code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rotate_left3(nums):
if len(nums) < 3:
return 0
nums.append(nums[0])
del nums[0]
return nums
<|reserved_special_token_1|>
'''
Given an array of ints length 3, return an array with the elements "rotated
left" so {1, 2, 3} yields {2, 3, 1}.
rotate_left3([1, 2, 3]) → [2, 3, 1]
rotate_left3([5, 11, 9]) → [11, 9, 5]
rotate_left3([7, 0, 0]) → [0, 0, 7]
'''
#卡了很久,还是列表的基本操作不太熟
#参考:https://zhidao.baidu.com/question/1244520812319200859.html
def rotate_left3(nums):
if len(nums) < 3:
return 0
nums.append(nums[0])#是nums.append(),下面是del nums[index]
del nums[0]
return nums
|
flexible
|
{
"blob_id": "b7ebee3c96fd9cd3d8ddc69838363925085a944d",
"index": 1347,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef rotate_left3(nums):\n if len(nums) < 3:\n return 0\n nums.append(nums[0])\n del nums[0]\n return nums\n",
"step-3": "'''\nGiven an array of ints length 3, return an array with the elements \"rotated\nleft\" so {1, 2, 3} yields {2, 3, 1}.\n\nrotate_left3([1, 2, 3]) → [2, 3, 1]\nrotate_left3([5, 11, 9]) → [11, 9, 5]\nrotate_left3([7, 0, 0]) → [0, 0, 7]\n'''\n\n#卡了很久,还是列表的基本操作不太熟\n#参考:https://zhidao.baidu.com/question/1244520812319200859.html\ndef rotate_left3(nums):\n if len(nums) < 3:\n return 0\n nums.append(nums[0])#是nums.append(),下面是del nums[index]\n del nums[0]\n return nums\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 21:03:43 2019
@author: 00124175
"""
"""
读取txt文件
该文本中的分割符既有空格又有制表符('/t'),sep参数用'/s+',可以匹配任何空格。
"""
#header=None:没有每列的column name,可以自己设定
#encoding='gb2312':其他编码中文显示错误
#sep=',':用逗号来分隔每行的数据
#index_col=0:设置第1列数据作为index
import pandas as pd
data = pd.read_table("1206sjl.txt",header=None,encoding='gb2312',sep='|',skiprows=1)
data1 = pd.read_table("1206sjl.txt",header=None,encoding='gb2312',sep='|',nrows=1)
cols_name = data1.iloc[:,0:80]
mydata = data.iloc[:,0:80]#读所有的行,0-79列
cols_name = cols_name.values.tolist()#转换为list
mydata.columns = cols_name#加上列名称
mydata.rename(columns=lambda x: x.strip(' '),inplace=True)#去掉dataframe中的前后空格
mydata[['__lat,__deg','__lon,__deg']] = mydata[['__lat,__deg','__lon,__deg']].apply(pd.to_numeric)
my_need_data = mydata[(mydata['__lat,__deg']>39.14) & (mydata['__lat,__deg']<39.17)&(mydata['__lon,__deg']>117.51)&(mydata['__lon,__deg']<117.53)]
print(my_need_data.iloc[:,0:3])
my_need_data.to_csv("result_csv.csv", index=0)
|
normal
|
{
"blob_id": "ab760ec4cbb9f616f38b0f0f2221987460c6f618",
"index": 6492,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmydata.rename(columns=lambda x: x.strip(' '), inplace=True)\n<mask token>\nprint(my_need_data.iloc[:, 0:3])\nmy_need_data.to_csv('result_csv.csv', index=0)\n",
"step-3": "<mask token>\ndata = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep='|',\n skiprows=1)\ndata1 = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep=\n '|', nrows=1)\ncols_name = data1.iloc[:, 0:80]\nmydata = data.iloc[:, 0:80]\ncols_name = cols_name.values.tolist()\nmydata.columns = cols_name\nmydata.rename(columns=lambda x: x.strip(' '), inplace=True)\nmydata[['__lat,__deg', '__lon,__deg']] = mydata[['__lat,__deg', '__lon,__deg']\n ].apply(pd.to_numeric)\nmy_need_data = mydata[(mydata['__lat,__deg'] > 39.14) & (mydata[\n '__lat,__deg'] < 39.17) & (mydata['__lon,__deg'] > 117.51) & (mydata[\n '__lon,__deg'] < 117.53)]\nprint(my_need_data.iloc[:, 0:3])\nmy_need_data.to_csv('result_csv.csv', index=0)\n",
"step-4": "<mask token>\nimport pandas as pd\ndata = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep='|',\n skiprows=1)\ndata1 = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep=\n '|', nrows=1)\ncols_name = data1.iloc[:, 0:80]\nmydata = data.iloc[:, 0:80]\ncols_name = cols_name.values.tolist()\nmydata.columns = cols_name\nmydata.rename(columns=lambda x: x.strip(' '), inplace=True)\nmydata[['__lat,__deg', '__lon,__deg']] = mydata[['__lat,__deg', '__lon,__deg']\n ].apply(pd.to_numeric)\nmy_need_data = mydata[(mydata['__lat,__deg'] > 39.14) & (mydata[\n '__lat,__deg'] < 39.17) & (mydata['__lon,__deg'] > 117.51) & (mydata[\n '__lon,__deg'] < 117.53)]\nprint(my_need_data.iloc[:, 0:3])\nmy_need_data.to_csv('result_csv.csv', index=0)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 18 21:03:43 2019\n\n@author: 00124175\n\"\"\"\n\n\"\"\"\n读取txt文件\n该文本中的分割符既有空格又有制表符('/t'),sep参数用'/s+',可以匹配任何空格。\n\"\"\"\n#header=None:没有每列的column name,可以自己设定\n#encoding='gb2312':其他编码中文显示错误\n#sep=',':用逗号来分隔每行的数据\n#index_col=0:设置第1列数据作为index\nimport pandas as pd\ndata = pd.read_table(\"1206sjl.txt\",header=None,encoding='gb2312',sep='|',skiprows=1)\ndata1 = pd.read_table(\"1206sjl.txt\",header=None,encoding='gb2312',sep='|',nrows=1)\n\ncols_name = data1.iloc[:,0:80]\nmydata = data.iloc[:,0:80]#读所有的行,0-79列\ncols_name = cols_name.values.tolist()#转换为list\nmydata.columns = cols_name#加上列名称\nmydata.rename(columns=lambda x: x.strip(' '),inplace=True)#去掉dataframe中的前后空格\nmydata[['__lat,__deg','__lon,__deg']] = mydata[['__lat,__deg','__lon,__deg']].apply(pd.to_numeric)\n\nmy_need_data = mydata[(mydata['__lat,__deg']>39.14) & (mydata['__lat,__deg']<39.17)&(mydata['__lon,__deg']>117.51)&(mydata['__lon,__deg']<117.53)]\nprint(my_need_data.iloc[:,0:3])\nmy_need_data.to_csv(\"result_csv.csv\", index=0)\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from rest_framework import viewsets
from .models import *
from serializer import *
from django.http import HttpResponse
from django.views import View
from django.core import serializers
# Create your views here.
class ProyectoViewSet(viewsets.ModelViewSet):
queryset = Proyecto.objects.all()
serializer_class = ProyectoSerializer
class UsuarioViewSet(viewsets.ModelViewSet):
queryset = Usuario.objects.all()
serializer_class = UsuariosSerializer
class SistemaViewSet(viewsets.ModelViewSet):
queryset = Sistema.objects.all()
serializer_class = SistemaSerializer
class ProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = ProyectoSistema.objects.all()
serializer_class = ProyectoSistemaSerializer
class UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = UsuarioProyectoSistema.objects.all()
serializer_class = UsuarioProyectoSistemaSerializer
class ProyectoSistemaView(View):
def get(self, request):
data = ProyectoSistema.objects.all()
json = serializers.serialize('json', data)
return HttpResponse(json, content_type='application/json')
|
normal
|
{
"blob_id": "bedae2621bfcc64deb0d13d7cbce3cfb89720245",
"index": 4346,
"step-1": "<mask token>\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-2": "<mask token>\n\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-3": "<mask token>\n\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-4": "<mask token>\n\n\nclass ProyectoViewSet(viewsets.ModelViewSet):\n queryset = Proyecto.objects.all()\n serializer_class = ProyectoSerializer\n\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n queryset = Usuario.objects.all()\n serializer_class = UsuariosSerializer\n\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-5": "from rest_framework import viewsets\nfrom .models import *\nfrom serializer import *\nfrom django.http import HttpResponse\nfrom django.views import View\nfrom django.core import serializers\n# Create your views here.\n\nclass ProyectoViewSet(viewsets.ModelViewSet):\n queryset = Proyecto.objects.all()\n serializer_class = ProyectoSerializer\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n queryset = Usuario.objects.all()\n serializer_class = UsuariosSerializer\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n\n",
"step-ids": [
6,
8,
9,
12,
14
]
}
|
[
6,
8,
9,
12,
14
] |
"""
Writes day of the week and time to a file.
Script written for crontab tutorial.
Author: Jessica Yung 2016
"""
import time
filename = "record_time.txt"
# Records time in format Sun 10:00:00
current_time = time.strftime('%a %H:%M:%S')
# Append output to file. 'a' is append mode.
with open(filename, 'a') as handle:
# Write (Append) output to a line
handle.write(str(current_time))
# Newline to separate different lines of output
handle.write('\n')
|
normal
|
{
"blob_id": "1f0695f0e9745912d8ee3a87e6c9b1272e9ebbae",
"index": 218,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(filename, 'a') as handle:\n handle.write(str(current_time))\n handle.write('\\n')\n",
"step-3": "<mask token>\nfilename = 'record_time.txt'\ncurrent_time = time.strftime('%a %H:%M:%S')\nwith open(filename, 'a') as handle:\n handle.write(str(current_time))\n handle.write('\\n')\n",
"step-4": "<mask token>\nimport time\nfilename = 'record_time.txt'\ncurrent_time = time.strftime('%a %H:%M:%S')\nwith open(filename, 'a') as handle:\n handle.write(str(current_time))\n handle.write('\\n')\n",
"step-5": "\"\"\"\nWrites day of the week and time to a file.\n\nScript written for crontab tutorial.\n\nAuthor: Jessica Yung 2016\n\n\"\"\"\nimport time\n\nfilename = \"record_time.txt\"\n\n# Records time in format Sun 10:00:00\ncurrent_time = time.strftime('%a %H:%M:%S')\n\n# Append output to file. 'a' is append mode.\nwith open(filename, 'a') as handle:\n\t# Write (Append) output to a line\n handle.write(str(current_time))\n # Newline to separate different lines of output\n handle.write('\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import discord
from app.vars.client import client
from app.helpers import delete, getUser, getGuild
@client.command()
async def inviteInfo(ctx, link):
try:
await delete.byContext(ctx)
except:
pass
linkData = await client.fetch_invite(url=link)
if (linkData.inviter):
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
embed = discord.Embed(title="Invite information", colour=discord.Color.purple())
embed.set_thumbnail(url=guildData.icon_url)
fields = [
("ID", f"```{guildData.id}```", True),
("Name::", f"```{guildData.name}```", True),
("Description", f"```{guildData.description}```", True),
("Created in:", f'```{guildData.created_at.strftime("%d/%m/%Y")}```', True),
("Member Count:", f"```{int(linkData.approximate_member_count)}```", True),
("Link", f"```{linkData.url}```", True),
("\u200b", "\u200b", True),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if (linkData.inviter):
embed.add_field(name="Inviter ID:", value=f"```{inviterData.id}```", inline=True)
embed.add_field(name="Inviter:", value=f"```{inviterData.name + '#' + inviterData.discriminator}```", inline=True)
embed.set_footer(text='Selfium (◔‿◔)')
await ctx.send(embed=embed)
|
normal
|
{
"blob_id": "b8f9633ab3110d00b2f0b82c78ad047fca0d3eee",
"index": 6999,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@client.command()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n linkData = await client.fetch_invite(url=link)\n if linkData.inviter:\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n embed = discord.Embed(title='Invite information', colour=discord.Color.\n purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [('ID', f'```{guildData.id}```', True), ('Name::',\n f'```{guildData.name}```', True), ('Description',\n f'```{guildData.description}```', True), ('Created in:',\n f\"```{guildData.created_at.strftime('%d/%m/%Y')}```\", True), (\n 'Member Count:', f'```{int(linkData.approximate_member_count)}```',\n True), ('Link', f'```{linkData.url}```', True), ('\\u200b', '\\u200b',\n True)]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n if linkData.inviter:\n embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',\n inline=True)\n embed.add_field(name='Inviter:', value=\n f\"```{inviterData.name + '#' + inviterData.discriminator}```\",\n inline=True)\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)\n",
"step-3": "import discord\nfrom app.vars.client import client\nfrom app.helpers import delete, getUser, getGuild\n\n\n@client.command()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n linkData = await client.fetch_invite(url=link)\n if linkData.inviter:\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n embed = discord.Embed(title='Invite information', colour=discord.Color.\n purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [('ID', f'```{guildData.id}```', True), ('Name::',\n f'```{guildData.name}```', True), ('Description',\n f'```{guildData.description}```', True), ('Created in:',\n f\"```{guildData.created_at.strftime('%d/%m/%Y')}```\", True), (\n 'Member Count:', f'```{int(linkData.approximate_member_count)}```',\n True), ('Link', f'```{linkData.url}```', True), ('\\u200b', '\\u200b',\n True)]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n if linkData.inviter:\n embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',\n inline=True)\n embed.add_field(name='Inviter:', value=\n f\"```{inviterData.name + '#' + inviterData.discriminator}```\",\n inline=True)\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)\n",
"step-4": "import discord\nfrom app.vars.client import client\nfrom app.helpers import delete, getUser, getGuild\n\n@client.command()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n\n linkData = await client.fetch_invite(url=link)\n if (linkData.inviter):\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n\n embed = discord.Embed(title=\"Invite information\", colour=discord.Color.purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [\n (\"ID\", f\"```{guildData.id}```\", True),\n (\"Name::\", f\"```{guildData.name}```\", True),\n (\"Description\", f\"```{guildData.description}```\", True),\n (\"Created in:\", f'```{guildData.created_at.strftime(\"%d/%m/%Y\")}```', True),\n (\"Member Count:\", f\"```{int(linkData.approximate_member_count)}```\", True), \n (\"Link\", f\"```{linkData.url}```\", True),\n (\"\\u200b\", \"\\u200b\", True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n \n if (linkData.inviter):\n embed.add_field(name=\"Inviter ID:\", value=f\"```{inviterData.id}```\", inline=True)\n embed.add_field(name=\"Inviter:\", value=f\"```{inviterData.name + '#' + inviterData.discriminator}```\", inline=True)\n\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def dac_voltage_set_handle(params):
help_info = ('dac set(<channel>,<value>)$\r\n \t channel(' +
help_str +
')\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
channel = params[0]
if channel not in dac_list:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'channel parameter error')
volt_value = float(params[1])
if channel == 'psu3_voltage' or channel == 'psu2_voltage':
if volt_value < 0 or volt_value > 10000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif channel == 'base_board':
if volt_value < 0 or volt_value > 3300:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif volt_value < 0 or volt_value > 5000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param voltage value error' +
str(volt_value))
ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,
volt_value)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
def dac_5761_write_register_handle(params):
help_info = (
'ad5761 register write(<addr>,<data>)$\r\n \t addr:register address $\r\n \t data:2byte data\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
addr = int(params[0], 16)
data = int(params[1], 16)
ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',
addr, data)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('/opt/seeing/app/')
<|reserved_special_token_0|>
global agv
<|reserved_special_token_0|>
if agv in xavier_module:
Xavier = xavier_module[agv]
<|reserved_special_token_0|>
global batt_value
<|reserved_special_token_0|>
global vbus_value
<|reserved_special_token_0|>
for i in dac_list:
help_str = help_str + i + ',\r\n\t '
def dac_voltage_set_handle(params):
help_info = ('dac set(<channel>,<value>)$\r\n \t channel(' +
help_str +
')\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
channel = params[0]
if channel not in dac_list:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'channel parameter error')
volt_value = float(params[1])
if channel == 'psu3_voltage' or channel == 'psu2_voltage':
if volt_value < 0 or volt_value > 10000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif channel == 'base_board':
if volt_value < 0 or volt_value > 3300:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif volt_value < 0 or volt_value > 5000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param voltage value error' +
str(volt_value))
ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,
volt_value)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
def dac_5761_write_register_handle(params):
help_info = (
'ad5761 register write(<addr>,<data>)$\r\n \t addr:register address $\r\n \t data:2byte data\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
addr = int(params[0], 16)
data = int(params[1], 16)
ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',
addr, data)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('/opt/seeing/app/')
<|reserved_special_token_0|>
global agv
agv = sys.argv[1]
Xavier = Xavier1
xavier_module = {'tcp:7801': Xavier1, 'tcp:7802': Xavier2}
if agv in xavier_module:
Xavier = xavier_module[agv]
test_base_board_name = 'zynq'
global batt_value
batt_value = {'current': 1, 'voltage': 1}
global vbus_value
vbus_value = {'current': 1, 'voltage': 1}
dac_list = ['psu1_ocp', 'psu1_ovp', 'psu1_ocp_ad5601', 'psu1_ovp_ad5601',
'psu1_current', 'psu1_voltage', 'psu2_ocp', 'psu2_ovp',
'psu2_ocp_ad5601', 'psu2_ovp_ad5601', 'psu2_current', 'psu3_ocp',
'psu3_ocp_ad5601', 'psu3_ovp', 'psu2_voltage', 'psu3_ovp_ad5601',
'psu3_current', 'psu3_voltage', 'base_board']
help_str = ''
for i in dac_list:
help_str = help_str + i + ',\r\n\t '
def dac_voltage_set_handle(params):
help_info = ('dac set(<channel>,<value>)$\r\n \t channel(' +
help_str +
')\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
channel = params[0]
if channel not in dac_list:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'channel parameter error')
volt_value = float(params[1])
if channel == 'psu3_voltage' or channel == 'psu2_voltage':
if volt_value < 0 or volt_value > 10000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif channel == 'base_board':
if volt_value < 0 or volt_value > 3300:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif volt_value < 0 or volt_value > 5000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param voltage value error' +
str(volt_value))
ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,
volt_value)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
def dac_5761_write_register_handle(params):
help_info = (
'ad5761 register write(<addr>,<data>)$\r\n \t addr:register address $\r\n \t data:2byte data\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
addr = int(params[0], 16)
data = int(params[1], 16)
ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',
addr, data)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
<|reserved_special_token_1|>
import re
import time
import sys
import command.server.handle_utility as Utility
from ee.common import logger
from ee.common import xavier as Xavier1
sys.path.append('/opt/seeing/app/')
from b31_bp import xavier1 as Xavier2
global agv
agv = sys.argv[1]
Xavier = Xavier1
xavier_module = {'tcp:7801': Xavier1, 'tcp:7802': Xavier2}
if agv in xavier_module:
Xavier = xavier_module[agv]
test_base_board_name = 'zynq'
global batt_value
batt_value = {'current': 1, 'voltage': 1}
global vbus_value
vbus_value = {'current': 1, 'voltage': 1}
dac_list = ['psu1_ocp', 'psu1_ovp', 'psu1_ocp_ad5601', 'psu1_ovp_ad5601',
'psu1_current', 'psu1_voltage', 'psu2_ocp', 'psu2_ovp',
'psu2_ocp_ad5601', 'psu2_ovp_ad5601', 'psu2_current', 'psu3_ocp',
'psu3_ocp_ad5601', 'psu3_ovp', 'psu2_voltage', 'psu3_ovp_ad5601',
'psu3_current', 'psu3_voltage', 'base_board']
help_str = ''
for i in dac_list:
help_str = help_str + i + ',\r\n\t '
def dac_voltage_set_handle(params):
help_info = ('dac set(<channel>,<value>)$\r\n \t channel(' +
help_str +
')\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
channel = params[0]
if channel not in dac_list:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'channel parameter error')
volt_value = float(params[1])
if channel == 'psu3_voltage' or channel == 'psu2_voltage':
if volt_value < 0 or volt_value > 10000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif channel == 'base_board':
if volt_value < 0 or volt_value > 3300:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'],
'param voltage value error' + str(volt_value))
elif volt_value < 0 or volt_value > 5000:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param voltage value error' +
str(volt_value))
ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,
volt_value)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
def dac_5761_write_register_handle(params):
help_info = (
'ad5761 register write(<addr>,<data>)$\r\n \t addr:register address $\r\n \t data:2byte data\r\n'
)
""" params init """
""" help """
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
""" parametr analysis """
if len(params) != 2:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_parameter_invalid'], 'param length error')
addr = int(params[0], 16)
data = int(params[1], 16)
ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',
addr, data)
if ret == False:
return Utility.handle_error(Utility.handle_errorno[
'handle_errno_execute_failure'], 'execute error')
return Utility.handle_done()
<|reserved_special_token_1|>
#-*- coding: UTF-8 -*-
import re
import time
import sys
import command.server.handle_utility as Utility
from ee.common import logger
from ee.common import xavier as Xavier1
sys.path.append('/opt/seeing/app/')
from b31_bp import xavier1 as Xavier2
global agv
agv=sys.argv[1]
Xavier=Xavier1
xavier_module = {"tcp:7801":Xavier1, "tcp:7802":Xavier2}
if agv in xavier_module:
Xavier=xavier_module[agv]
test_base_board_name = "zynq"
global batt_value
batt_value={"current":1,"voltage":1,}#current unit mA,mV
global vbus_value
vbus_value={"current":1,"voltage":1,}#current unit mA,mV
dac_list=[ "psu1_ocp" , "psu1_ovp", "psu1_ocp_ad5601" , "psu1_ovp_ad5601", "psu1_current", "psu1_voltage",
"psu2_ocp", "psu2_ovp","psu2_ocp_ad5601", "psu2_ovp_ad5601","psu2_current", "psu3_ocp","psu3_ocp_ad5601","psu3_ovp", "psu2_voltage","psu3_ovp_ad5601", "psu3_current" ,"psu3_voltage", "base_board"]
help_str=''
for i in dac_list:
help_str=help_str+i+',\r\n\t '
# global calibration_var
# def _calibration_init():
# global calibration_var
# """从eeprom读取数据的程序"""
# _calibration_init()#这个函数模块一
#
def dac_voltage_set_handle(params):
help_info = "dac set(<channel>,<value>)$\r\n\
\t channel("+help_str+")\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\r\n"
''' params init '''
''' help '''
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
''' parametr analysis '''
if len(params)!=2:
return Utility.handle_error(Utility.handle_errorno["handle_errno_parameter_invalid"],\
"param length error" )
channel=params[0]
if channel not in dac_list:
return Utility.handle_error(Utility.handle_errorno["handle_errno_parameter_invalid"] ,\
"channel parameter error" )
volt_value=float(params[1])
if channel=="psu3_voltage" or channel=="psu2_voltage" :
if volt_value<0 or volt_value>10000:
return Utility.handle_error(Utility.handle_errorno['handle_errno_parameter_invalid'],\
"param voltage value error" + str(volt_value))
elif channel=="base_board" :
if volt_value<0 or volt_value>3300:
return Utility.handle_error(Utility.handle_errorno['handle_errno_parameter_invalid'],\
"param voltage value error" + str(volt_value))
else:
if volt_value<0 or volt_value>5000:
return Utility.handle_error(Utility.handle_errorno['handle_errno_parameter_invalid'],\
"param voltage value error" + str(volt_value))
ret=Xavier.call("eval",test_base_board_name,"voltage_set",channel,volt_value)
if ret==False:
return Utility.handle_error(Utility.handle_errorno['handle_errno_execute_failure'],\
"execute error")
return Utility.handle_done()
def dac_5761_write_register_handle(params):
help_info = "ad5761 register write(<addr>,<data>)$\r\n\
\t addr:register address $\r\n\
\t data:2byte data\r\n"
''' params init '''
''' help '''
if Utility.is_ask_for_help(params) is True:
return Utility.handle_done(help_info)
''' parametr analysis '''
if len(params)!=2:
return Utility.handle_error(Utility.handle_errorno["handle_errno_parameter_invalid"],\
"param length error" )
addr=int(params[0],16)
data=int(params[1],16)
ret=Xavier.call("eval",test_base_board_name,"ad5761_write_register",addr,data)
if ret==False:
return Utility.handle_error(Utility.handle_errorno['handle_errno_execute_failure'],\
"execute error")
return Utility.handle_done()
|
flexible
|
{
"blob_id": "10e1756dc1d6c7b6b7e3569de78e9fa4cdfb0d7e",
"index": 7136,
"step-1": "<mask token>\n\n\ndef dac_voltage_set_handle(params):\n help_info = ('dac set(<channel>,<value>)$\\r\\n \\t channel(' +\n help_str +\n ')\\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n channel = params[0]\n if channel not in dac_list:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'channel parameter error')\n volt_value = float(params[1])\n if channel == 'psu3_voltage' or channel == 'psu2_voltage':\n if volt_value < 0 or volt_value > 10000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif channel == 'base_board':\n if volt_value < 0 or volt_value > 3300:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif volt_value < 0 or volt_value > 5000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param voltage value error' +\n str(volt_value))\n ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,\n volt_value)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n\n\ndef dac_5761_write_register_handle(params):\n help_info = (\n 'ad5761 register write(<addr>,<data>)$\\r\\n \\t addr:register address $\\r\\n \\t data:2byte data\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n addr = int(params[0], 16)\n data = int(params[1], 16)\n ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',\n addr, data)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n",
"step-2": "<mask token>\nsys.path.append('/opt/seeing/app/')\n<mask token>\nglobal agv\n<mask token>\nif agv in xavier_module:\n Xavier = xavier_module[agv]\n<mask token>\nglobal batt_value\n<mask token>\nglobal vbus_value\n<mask token>\nfor i in dac_list:\n help_str = help_str + i + ',\\r\\n\\t '\n\n\ndef dac_voltage_set_handle(params):\n help_info = ('dac set(<channel>,<value>)$\\r\\n \\t channel(' +\n help_str +\n ')\\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n channel = params[0]\n if channel not in dac_list:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'channel parameter error')\n volt_value = float(params[1])\n if channel == 'psu3_voltage' or channel == 'psu2_voltage':\n if volt_value < 0 or volt_value > 10000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif channel == 'base_board':\n if volt_value < 0 or volt_value > 3300:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif volt_value < 0 or volt_value > 5000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param voltage value error' +\n str(volt_value))\n ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,\n volt_value)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n\n\ndef dac_5761_write_register_handle(params):\n help_info = (\n 'ad5761 register write(<addr>,<data>)$\\r\\n \\t addr:register address $\\r\\n \\t data:2byte data\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n addr = int(params[0], 16)\n data = int(params[1], 16)\n ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',\n addr, data)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n",
"step-3": "<mask token>\nsys.path.append('/opt/seeing/app/')\n<mask token>\nglobal agv\nagv = sys.argv[1]\nXavier = Xavier1\nxavier_module = {'tcp:7801': Xavier1, 'tcp:7802': Xavier2}\nif agv in xavier_module:\n Xavier = xavier_module[agv]\ntest_base_board_name = 'zynq'\nglobal batt_value\nbatt_value = {'current': 1, 'voltage': 1}\nglobal vbus_value\nvbus_value = {'current': 1, 'voltage': 1}\ndac_list = ['psu1_ocp', 'psu1_ovp', 'psu1_ocp_ad5601', 'psu1_ovp_ad5601',\n 'psu1_current', 'psu1_voltage', 'psu2_ocp', 'psu2_ovp',\n 'psu2_ocp_ad5601', 'psu2_ovp_ad5601', 'psu2_current', 'psu3_ocp',\n 'psu3_ocp_ad5601', 'psu3_ovp', 'psu2_voltage', 'psu3_ovp_ad5601',\n 'psu3_current', 'psu3_voltage', 'base_board']\nhelp_str = ''\nfor i in dac_list:\n help_str = help_str + i + ',\\r\\n\\t '\n\n\ndef dac_voltage_set_handle(params):\n help_info = ('dac set(<channel>,<value>)$\\r\\n \\t channel(' +\n help_str +\n ')\\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n channel = params[0]\n if channel not in dac_list:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'channel parameter error')\n volt_value = float(params[1])\n if channel == 'psu3_voltage' or channel == 'psu2_voltage':\n if volt_value < 0 or volt_value > 10000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif channel == 'base_board':\n if volt_value < 0 or volt_value > 3300:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif volt_value < 0 or volt_value > 5000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param voltage value error' +\n str(volt_value))\n ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,\n volt_value)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n\n\ndef dac_5761_write_register_handle(params):\n help_info = (\n 'ad5761 register write(<addr>,<data>)$\\r\\n \\t addr:register address $\\r\\n \\t data:2byte data\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n addr = int(params[0], 16)\n data = int(params[1], 16)\n ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',\n addr, data)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n",
"step-4": "import re\nimport time\nimport sys\nimport command.server.handle_utility as Utility\nfrom ee.common import logger\nfrom ee.common import xavier as Xavier1\nsys.path.append('/opt/seeing/app/')\nfrom b31_bp import xavier1 as Xavier2\nglobal agv\nagv = sys.argv[1]\nXavier = Xavier1\nxavier_module = {'tcp:7801': Xavier1, 'tcp:7802': Xavier2}\nif agv in xavier_module:\n Xavier = xavier_module[agv]\ntest_base_board_name = 'zynq'\nglobal batt_value\nbatt_value = {'current': 1, 'voltage': 1}\nglobal vbus_value\nvbus_value = {'current': 1, 'voltage': 1}\ndac_list = ['psu1_ocp', 'psu1_ovp', 'psu1_ocp_ad5601', 'psu1_ovp_ad5601',\n 'psu1_current', 'psu1_voltage', 'psu2_ocp', 'psu2_ovp',\n 'psu2_ocp_ad5601', 'psu2_ovp_ad5601', 'psu2_current', 'psu3_ocp',\n 'psu3_ocp_ad5601', 'psu3_ovp', 'psu2_voltage', 'psu3_ovp_ad5601',\n 'psu3_current', 'psu3_voltage', 'base_board']\nhelp_str = ''\nfor i in dac_list:\n help_str = help_str + i + ',\\r\\n\\t '\n\n\ndef dac_voltage_set_handle(params):\n help_info = ('dac set(<channel>,<value>)$\\r\\n \\t channel(' +\n help_str +\n ')\\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n channel = params[0]\n if channel not in dac_list:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'channel parameter error')\n volt_value = float(params[1])\n if channel == 'psu3_voltage' or channel == 'psu2_voltage':\n if volt_value < 0 or volt_value > 10000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif channel == 'base_board':\n if volt_value < 0 or volt_value > 3300:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], \n 'param voltage value error' + str(volt_value))\n elif volt_value < 0 or volt_value > 5000:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param voltage value error' +\n str(volt_value))\n ret = Xavier.call('eval', test_base_board_name, 'voltage_set', channel,\n volt_value)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n\n\ndef dac_5761_write_register_handle(params):\n help_info = (\n 'ad5761 register write(<addr>,<data>)$\\r\\n \\t addr:register address $\\r\\n \\t data:2byte data\\r\\n'\n )\n \"\"\" params init \"\"\"\n \"\"\" help \"\"\"\n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \"\"\" parametr analysis \"\"\"\n if len(params) != 2:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_parameter_invalid'], 'param length error')\n addr = int(params[0], 16)\n data = int(params[1], 16)\n ret = Xavier.call('eval', test_base_board_name, 'ad5761_write_register',\n addr, data)\n if ret == False:\n return Utility.handle_error(Utility.handle_errorno[\n 'handle_errno_execute_failure'], 'execute error')\n return Utility.handle_done()\n",
"step-5": "#-*- coding: UTF-8 -*-\nimport re\nimport time\nimport sys\nimport command.server.handle_utility as Utility\nfrom ee.common import logger\nfrom ee.common import xavier as Xavier1\nsys.path.append('/opt/seeing/app/')\nfrom b31_bp import xavier1 as Xavier2\n\nglobal agv\nagv=sys.argv[1]\nXavier=Xavier1\nxavier_module = {\"tcp:7801\":Xavier1, \"tcp:7802\":Xavier2}\nif agv in xavier_module:\n Xavier=xavier_module[agv]\n \ntest_base_board_name = \"zynq\"\n\nglobal batt_value\nbatt_value={\"current\":1,\"voltage\":1,}#current unit mA,mV\n\nglobal vbus_value\nvbus_value={\"current\":1,\"voltage\":1,}#current unit mA,mV\n\ndac_list=[ \"psu1_ocp\" , \"psu1_ovp\", \"psu1_ocp_ad5601\" , \"psu1_ovp_ad5601\", \"psu1_current\", \"psu1_voltage\",\n \"psu2_ocp\", \"psu2_ovp\",\"psu2_ocp_ad5601\", \"psu2_ovp_ad5601\",\"psu2_current\", \"psu3_ocp\",\"psu3_ocp_ad5601\",\"psu3_ovp\", \"psu2_voltage\",\"psu3_ovp_ad5601\", \"psu3_current\" ,\"psu3_voltage\", \"base_board\"]\nhelp_str=''\nfor i in dac_list:\n help_str=help_str+i+',\\r\\n\\t '\n\n# global calibration_var \n# def _calibration_init():\n# global calibration_var \n# \"\"\"从eeprom读取数据的程序\"\"\"\n# _calibration_init()#这个函数模块一\n# \n\ndef dac_voltage_set_handle(params):\n help_info = \"dac set(<channel>,<value>)$\\r\\n\\\n \\t channel(\"+help_str+\")\\tvalue: (if ad5761: (0~10000) unit:mv,else :(0~5000) unit:mv) $\\r\\n\"\n ''' params init ''' \n ''' help ''' \n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \n ''' parametr analysis '''\n if len(params)!=2:\n return Utility.handle_error(Utility.handle_errorno[\"handle_errno_parameter_invalid\"],\\\n \"param length error\" ) \n channel=params[0]\n if channel not in dac_list:\n return Utility.handle_error(Utility.handle_errorno[\"handle_errno_parameter_invalid\"] ,\\\n \"channel parameter error\" ) \n volt_value=float(params[1])\n if channel==\"psu3_voltage\" or channel==\"psu2_voltage\" :\n if volt_value<0 or volt_value>10000:\n return Utility.handle_error(Utility.handle_errorno['handle_errno_parameter_invalid'],\\\n \"param voltage value error\" + str(volt_value)) \n elif channel==\"base_board\" :\n if volt_value<0 or volt_value>3300:\n return Utility.handle_error(Utility.handle_errorno['handle_errno_parameter_invalid'],\\\n \"param voltage value error\" + str(volt_value)) \n else:\n if volt_value<0 or volt_value>5000:\n return Utility.handle_error(Utility.handle_errorno['handle_errno_parameter_invalid'],\\\n \"param voltage value error\" + str(volt_value)) \n ret=Xavier.call(\"eval\",test_base_board_name,\"voltage_set\",channel,volt_value)\n if ret==False:\n return Utility.handle_error(Utility.handle_errorno['handle_errno_execute_failure'],\\\n \"execute error\") \n return Utility.handle_done()\ndef dac_5761_write_register_handle(params):\n help_info = \"ad5761 register write(<addr>,<data>)$\\r\\n\\\n \\t addr:register address $\\r\\n\\\n \\t data:2byte data\\r\\n\"\n ''' params init ''' \n ''' help ''' \n if Utility.is_ask_for_help(params) is True:\n return Utility.handle_done(help_info)\n \n ''' parametr analysis '''\n if len(params)!=2:\n return Utility.handle_error(Utility.handle_errorno[\"handle_errno_parameter_invalid\"],\\\n \"param length error\" ) \n addr=int(params[0],16) \n data=int(params[1],16) \n ret=Xavier.call(\"eval\",test_base_board_name,\"ad5761_write_register\",addr,data)\n if ret==False:\n return Utility.handle_error(Utility.handle_errorno['handle_errno_execute_failure'],\\\n \"execute error\") \n return Utility.handle_done()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding=utf-8
# oscm_app/cart/models
# django imports
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
# OSCM imports
from ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS
from ...utils import get_attr
from ..cart_manager import CartQuerySet
from .cart_item import CartItem
class Cart(models.Model):
"""
This class is used to represent the Cart for the users.
"""
# Owner of the cart
owner = models.ForeignKey(
get_attr('AUTH_USER_MODEL'),
blank=False,
related_name='carts',
verbose_name=_("oscm_admin_ownerOfCart"),
help_text=_('oscm_admin_helpTextOwnerOfCart'),
limit_choices_to={'role': get_attr('DEFAULT_ROLE')},
)
# Project name
project_name = models.CharField(
verbose_name=_('oscm_admin_projectNameOfCart'),
help_text=_('oscm_admin_helpTextProjectNameOfCart'),
max_length=250,
blank=False,
null=False
)
# Creation date
creation_date = models.DateTimeField(
verbose_name=_('oscm_admin_creationDateOfCart'),
auto_now_add=True,
)
# Last edit date
last_edit_date = models.DateTimeField(
verbose_name=_('oscm_admin_lastEditDateOfCart'),
auto_now=True,
)
# Requested due date
requested_due_date = models.DateTimeField(
verbose_name=_('oscm_admin_requestedDueDateOfCart'),
help_text=_('oscm_admin_helpTextRequestedDueDateOfCart'),
)
# Default parameter for the status attribute
DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)
# Retrieved the different statuses from the settings file
CART_STATUSES = get_attr(CART_STATUSES)
# Status
status = models.IntegerField(
verbose_name=_('oscm_admin_statusOfCart'),
max_length=32,
default=DEFAULT_CART_STATUS,
choices=CART_STATUSES,
help_text=_('oscm_admin_helpTextStatusOfCart'),
)
# Short description about the cart
description = models.TextField(
verbose_name=_("oscm_admin_descriptionOfCart"),
blank=True,
help_text=_('oscm_admin_helpTextDescriptionOfCart'),
)
# Item count (not equal to quantity, but distinct item count)
class Meta:
ordering = ["status", "creation_date", ]
db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)
verbose_name = _('oscm_admin_headerOfCart')
verbose_name_plural = _('oscm_admin_headerOfCarts')
objects = CartQuerySet.as_manager()
def __str__(self):
"""
Displays the status, the owner, the project
name and the number of cart items.
"""
return _(
"cart (status: %(status)s, owner: %(owner)s, project name: "
"%(project_name)s, number of cart items: %(nb_cart_items)d, "
"total amount: %(total_amount)d)"
) % {
'status': self.CART_STATUSES[self.status][1],
'owner': self.owner,
'project_name': self.project_name,
'nb_cart_items': self.nb_cart_items,
'total_amount': self.total_amount,
}
def get_cart_items(self):
"""
Retrieves all cart items for a given cart.
"""
return CartItem.objects.filter(cart=self)
@property
def nb_cart_items(self):
"""
Retrieves the number of distinct cart items for a given cart.
"""
return CartItem.objects.filter(cart=self).count()
@property
def total_amount(self):
"""
Retrieves the total amount of cart items for a given cart.
"""
total_amount = 0
for cart_item in self.get_cart_items():
total_amount += cart_item.total_price
return total_amount
@property
def is_empty(self):
"""
Test if this cart is empty.
"""
return self.id is None or self.nb_cart_items == 0
def get_absolute_url(self):
return reverse(
'oscm:cart',
kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse(
'oscm:delete_cart',
kwargs={'pk': self.pk})
|
normal
|
{
"blob_id": "ae0ccbb9b0a2c61d9ee9615ba8d0c1a186a81c34",
"index": 3177,
"step-1": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n <mask token>\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-2": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n <mask token>\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-3": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n owner = models.ForeignKey(get_attr('AUTH_USER_MODEL'), blank=False,\n related_name='carts', verbose_name=_('oscm_admin_ownerOfCart'),\n help_text=_('oscm_admin_helpTextOwnerOfCart'), limit_choices_to={\n 'role': get_attr('DEFAULT_ROLE')})\n project_name = models.CharField(verbose_name=_(\n 'oscm_admin_projectNameOfCart'), help_text=_(\n 'oscm_admin_helpTextProjectNameOfCart'), max_length=250, blank=\n False, null=False)\n creation_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_creationDateOfCart'), auto_now_add=True)\n last_edit_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_lastEditDateOfCart'), auto_now=True)\n requested_due_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_requestedDueDateOfCart'), help_text=_(\n 'oscm_admin_helpTextRequestedDueDateOfCart'))\n DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)\n CART_STATUSES = get_attr(CART_STATUSES)\n status = models.IntegerField(verbose_name=_('oscm_admin_statusOfCart'),\n max_length=32, default=DEFAULT_CART_STATUS, choices=CART_STATUSES,\n help_text=_('oscm_admin_helpTextStatusOfCart'))\n description = models.TextField(verbose_name=_(\n 'oscm_admin_descriptionOfCart'), blank=True, help_text=_(\n 'oscm_admin_helpTextDescriptionOfCart'))\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n objects = CartQuerySet.as_manager()\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-4": "from django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS\nfrom ...utils import get_attr\nfrom ..cart_manager import CartQuerySet\nfrom .cart_item import CartItem\n\n\nclass Cart(models.Model):\n \"\"\"\n This class is used to represent the Cart for the users.\n \"\"\"\n owner = models.ForeignKey(get_attr('AUTH_USER_MODEL'), blank=False,\n related_name='carts', verbose_name=_('oscm_admin_ownerOfCart'),\n help_text=_('oscm_admin_helpTextOwnerOfCart'), limit_choices_to={\n 'role': get_attr('DEFAULT_ROLE')})\n project_name = models.CharField(verbose_name=_(\n 'oscm_admin_projectNameOfCart'), help_text=_(\n 'oscm_admin_helpTextProjectNameOfCart'), max_length=250, blank=\n False, null=False)\n creation_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_creationDateOfCart'), auto_now_add=True)\n last_edit_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_lastEditDateOfCart'), auto_now=True)\n requested_due_date = models.DateTimeField(verbose_name=_(\n 'oscm_admin_requestedDueDateOfCart'), help_text=_(\n 'oscm_admin_helpTextRequestedDueDateOfCart'))\n DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)\n CART_STATUSES = get_attr(CART_STATUSES)\n status = models.IntegerField(verbose_name=_('oscm_admin_statusOfCart'),\n max_length=32, default=DEFAULT_CART_STATUS, choices=CART_STATUSES,\n help_text=_('oscm_admin_helpTextStatusOfCart'))\n description = models.TextField(verbose_name=_(\n 'oscm_admin_descriptionOfCart'), blank=True, help_text=_(\n 'oscm_admin_helpTextDescriptionOfCart'))\n\n\n class Meta:\n ordering = ['status', 'creation_date']\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n objects = CartQuerySet.as_manager()\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n 'cart (status: %(status)s, owner: %(owner)s, project name: %(project_name)s, number of cart items: %(nb_cart_items)d, total amount: %(total_amount)d)'\n ) % {'status': self.CART_STATUSES[self.status][1], 'owner':\n self.owner, 'project_name': self.project_name, 'nb_cart_items':\n self.nb_cart_items, 'total_amount': self.total_amount}\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse('oscm:cart', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('oscm:delete_cart', kwargs={'pk': self.pk})\n",
"step-5": "# coding=utf-8\n# oscm_app/cart/models\n\n# django imports\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n# OSCM imports\nfrom ...constants import CARTS, CART_STATUSES, DEFAULT_CART_STATUS\nfrom ...utils import get_attr\nfrom ..cart_manager import CartQuerySet\nfrom .cart_item import CartItem\n\n\nclass Cart(models.Model):\n\n \"\"\"\n This class is used to represent the Cart for the users.\n \"\"\"\n\n # Owner of the cart\n owner = models.ForeignKey(\n get_attr('AUTH_USER_MODEL'),\n blank=False,\n related_name='carts',\n verbose_name=_(\"oscm_admin_ownerOfCart\"),\n help_text=_('oscm_admin_helpTextOwnerOfCart'),\n limit_choices_to={'role': get_attr('DEFAULT_ROLE')},\n )\n\n # Project name\n project_name = models.CharField(\n verbose_name=_('oscm_admin_projectNameOfCart'),\n help_text=_('oscm_admin_helpTextProjectNameOfCart'),\n max_length=250,\n blank=False,\n null=False\n )\n\n # Creation date\n creation_date = models.DateTimeField(\n verbose_name=_('oscm_admin_creationDateOfCart'),\n auto_now_add=True,\n )\n # Last edit date\n last_edit_date = models.DateTimeField(\n verbose_name=_('oscm_admin_lastEditDateOfCart'),\n auto_now=True,\n )\n # Requested due date\n requested_due_date = models.DateTimeField(\n verbose_name=_('oscm_admin_requestedDueDateOfCart'),\n help_text=_('oscm_admin_helpTextRequestedDueDateOfCart'),\n )\n\n # Default parameter for the status attribute\n DEFAULT_CART_STATUS = get_attr(DEFAULT_CART_STATUS)\n # Retrieved the different statuses from the settings file\n CART_STATUSES = get_attr(CART_STATUSES)\n # Status\n status = models.IntegerField(\n verbose_name=_('oscm_admin_statusOfCart'),\n max_length=32,\n default=DEFAULT_CART_STATUS,\n choices=CART_STATUSES,\n help_text=_('oscm_admin_helpTextStatusOfCart'),\n )\n # Short description about the cart\n description = models.TextField(\n verbose_name=_(\"oscm_admin_descriptionOfCart\"),\n blank=True,\n help_text=_('oscm_admin_helpTextDescriptionOfCart'),\n )\n # Item count (not equal to quantity, but distinct item count)\n\n class Meta:\n ordering = [\"status\", \"creation_date\", ]\n db_table = '%s_%s' % (get_attr('APP_NAME'), CARTS)\n verbose_name = _('oscm_admin_headerOfCart')\n verbose_name_plural = _('oscm_admin_headerOfCarts')\n\n objects = CartQuerySet.as_manager()\n\n def __str__(self):\n \"\"\"\n Displays the status, the owner, the project\n name and the number of cart items.\n \"\"\"\n return _(\n \"cart (status: %(status)s, owner: %(owner)s, project name: \"\n \"%(project_name)s, number of cart items: %(nb_cart_items)d, \"\n \"total amount: %(total_amount)d)\"\n ) % {\n 'status': self.CART_STATUSES[self.status][1],\n 'owner': self.owner,\n 'project_name': self.project_name,\n 'nb_cart_items': self.nb_cart_items,\n 'total_amount': self.total_amount,\n }\n\n def get_cart_items(self):\n \"\"\"\n Retrieves all cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self)\n\n @property\n def nb_cart_items(self):\n \"\"\"\n Retrieves the number of distinct cart items for a given cart.\n \"\"\"\n return CartItem.objects.filter(cart=self).count()\n\n @property\n def total_amount(self):\n \"\"\"\n Retrieves the total amount of cart items for a given cart.\n \"\"\"\n total_amount = 0\n for cart_item in self.get_cart_items():\n total_amount += cart_item.total_price\n return total_amount\n\n @property\n def is_empty(self):\n \"\"\"\n Test if this cart is empty.\n \"\"\"\n return self.id is None or self.nb_cart_items == 0\n\n def get_absolute_url(self):\n return reverse(\n 'oscm:cart',\n kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse(\n 'oscm:delete_cart',\n kwargs={'pk': self.pk})\n",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
class CPU(DistantEnum):
k8 = 'k8'
piii = 'piii'
darwin = 'darwin'
freebsd = 'freebsd'
armeabi = 'armeabi-v7a'
arm = 'arm'
aarch64 = 'aarch64'
x64_windows = 'x64_windows'
x64_windows_msvc = 'x64_windows_msvc'
s390x = 's390x'
ppc = 'ppc'
ppc64 = 'ppc64'
class CompilationMode(DistantEnum):
fastbuild = 'fastbuild'
dbg = 'dbg'
opt = 'opt'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DistantEnum(Enum):
<|reserved_special_token_0|>
class CPU(DistantEnum):
k8 = 'k8'
piii = 'piii'
darwin = 'darwin'
freebsd = 'freebsd'
armeabi = 'armeabi-v7a'
arm = 'arm'
aarch64 = 'aarch64'
x64_windows = 'x64_windows'
x64_windows_msvc = 'x64_windows_msvc'
s390x = 's390x'
ppc = 'ppc'
ppc64 = 'ppc64'
class CompilationMode(DistantEnum):
fastbuild = 'fastbuild'
dbg = 'dbg'
opt = 'opt'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
EXIT_CODES = ['SUCCESS', 'BUILD_FAILURE', 'PARSING_FAILURE',
'COMMAND_LINE_ERROR', 'TESTS_FAILED', 'PARTIAL_ANALYSIS_FAILURE',
'NO_TESTS_FOUND', 'RUN_FAILURE', 'ANALYSIS_FAILURE', 'INTERRUPTED',
'LOCK_HELD_NOBLOCK_FOR_LOCK', 'REMOTE_ENVIRONMENTAL_ERROR', 'OOM_ERROR',
'REMOTE_ERROR', 'LOCAL_ENVIRONMENT_ERROR', 'BLAZE_INTERNAL_ERROR',
'PUBLISH_ERROR', 'PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR']
class DistantEnum(Enum):
def __str__(self):
return str(self.value)
class CPU(DistantEnum):
k8 = 'k8'
piii = 'piii'
darwin = 'darwin'
freebsd = 'freebsd'
armeabi = 'armeabi-v7a'
arm = 'arm'
aarch64 = 'aarch64'
x64_windows = 'x64_windows'
x64_windows_msvc = 'x64_windows_msvc'
s390x = 's390x'
ppc = 'ppc'
ppc64 = 'ppc64'
class CompilationMode(DistantEnum):
fastbuild = 'fastbuild'
dbg = 'dbg'
opt = 'opt'
<|reserved_special_token_1|>
from enum import Enum
EXIT_CODES = ['SUCCESS', 'BUILD_FAILURE', 'PARSING_FAILURE',
'COMMAND_LINE_ERROR', 'TESTS_FAILED', 'PARTIAL_ANALYSIS_FAILURE',
'NO_TESTS_FOUND', 'RUN_FAILURE', 'ANALYSIS_FAILURE', 'INTERRUPTED',
'LOCK_HELD_NOBLOCK_FOR_LOCK', 'REMOTE_ENVIRONMENTAL_ERROR', 'OOM_ERROR',
'REMOTE_ERROR', 'LOCAL_ENVIRONMENT_ERROR', 'BLAZE_INTERNAL_ERROR',
'PUBLISH_ERROR', 'PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR']
class DistantEnum(Enum):
def __str__(self):
return str(self.value)
class CPU(DistantEnum):
k8 = 'k8'
piii = 'piii'
darwin = 'darwin'
freebsd = 'freebsd'
armeabi = 'armeabi-v7a'
arm = 'arm'
aarch64 = 'aarch64'
x64_windows = 'x64_windows'
x64_windows_msvc = 'x64_windows_msvc'
s390x = 's390x'
ppc = 'ppc'
ppc64 = 'ppc64'
class CompilationMode(DistantEnum):
fastbuild = 'fastbuild'
dbg = 'dbg'
opt = 'opt'
<|reserved_special_token_1|>
from enum import Enum
EXIT_CODES = [
"SUCCESS",
"BUILD_FAILURE",
"PARSING_FAILURE",
"COMMAND_LINE_ERROR",
"TESTS_FAILED",
"PARTIAL_ANALYSIS_FAILURE",
"NO_TESTS_FOUND",
"RUN_FAILURE",
"ANALYSIS_FAILURE",
"INTERRUPTED",
"LOCK_HELD_NOBLOCK_FOR_LOCK",
"REMOTE_ENVIRONMENTAL_ERROR",
"OOM_ERROR",
"REMOTE_ERROR",
"LOCAL_ENVIRONMENT_ERROR",
"BLAZE_INTERNAL_ERROR",
"PUBLISH_ERROR",
"PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR"
]
class DistantEnum(Enum):
def __str__(self):
return str(self.value)
class CPU(DistantEnum):
k8 = "k8"
piii = "piii"
darwin = "darwin"
freebsd = "freebsd"
armeabi = "armeabi-v7a"
arm = "arm"
aarch64 = "aarch64"
x64_windows = "x64_windows"
x64_windows_msvc = "x64_windows_msvc"
s390x = "s390x"
ppc = "ppc"
ppc64 = "ppc64"
class CompilationMode(DistantEnum):
fastbuild = "fastbuild"
dbg = "dbg"
opt = "opt"
|
flexible
|
{
"blob_id": "5e86e97281b9d18a06efc62b20f5399611e3510d",
"index": 8000,
"step-1": "<mask token>\n\n\nclass CPU(DistantEnum):\n k8 = 'k8'\n piii = 'piii'\n darwin = 'darwin'\n freebsd = 'freebsd'\n armeabi = 'armeabi-v7a'\n arm = 'arm'\n aarch64 = 'aarch64'\n x64_windows = 'x64_windows'\n x64_windows_msvc = 'x64_windows_msvc'\n s390x = 's390x'\n ppc = 'ppc'\n ppc64 = 'ppc64'\n\n\nclass CompilationMode(DistantEnum):\n fastbuild = 'fastbuild'\n dbg = 'dbg'\n opt = 'opt'\n",
"step-2": "<mask token>\n\n\nclass DistantEnum(Enum):\n <mask token>\n\n\nclass CPU(DistantEnum):\n k8 = 'k8'\n piii = 'piii'\n darwin = 'darwin'\n freebsd = 'freebsd'\n armeabi = 'armeabi-v7a'\n arm = 'arm'\n aarch64 = 'aarch64'\n x64_windows = 'x64_windows'\n x64_windows_msvc = 'x64_windows_msvc'\n s390x = 's390x'\n ppc = 'ppc'\n ppc64 = 'ppc64'\n\n\nclass CompilationMode(DistantEnum):\n fastbuild = 'fastbuild'\n dbg = 'dbg'\n opt = 'opt'\n",
"step-3": "<mask token>\nEXIT_CODES = ['SUCCESS', 'BUILD_FAILURE', 'PARSING_FAILURE',\n 'COMMAND_LINE_ERROR', 'TESTS_FAILED', 'PARTIAL_ANALYSIS_FAILURE',\n 'NO_TESTS_FOUND', 'RUN_FAILURE', 'ANALYSIS_FAILURE', 'INTERRUPTED',\n 'LOCK_HELD_NOBLOCK_FOR_LOCK', 'REMOTE_ENVIRONMENTAL_ERROR', 'OOM_ERROR',\n 'REMOTE_ERROR', 'LOCAL_ENVIRONMENT_ERROR', 'BLAZE_INTERNAL_ERROR',\n 'PUBLISH_ERROR', 'PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR']\n\n\nclass DistantEnum(Enum):\n\n def __str__(self):\n return str(self.value)\n\n\nclass CPU(DistantEnum):\n k8 = 'k8'\n piii = 'piii'\n darwin = 'darwin'\n freebsd = 'freebsd'\n armeabi = 'armeabi-v7a'\n arm = 'arm'\n aarch64 = 'aarch64'\n x64_windows = 'x64_windows'\n x64_windows_msvc = 'x64_windows_msvc'\n s390x = 's390x'\n ppc = 'ppc'\n ppc64 = 'ppc64'\n\n\nclass CompilationMode(DistantEnum):\n fastbuild = 'fastbuild'\n dbg = 'dbg'\n opt = 'opt'\n",
"step-4": "from enum import Enum\nEXIT_CODES = ['SUCCESS', 'BUILD_FAILURE', 'PARSING_FAILURE',\n 'COMMAND_LINE_ERROR', 'TESTS_FAILED', 'PARTIAL_ANALYSIS_FAILURE',\n 'NO_TESTS_FOUND', 'RUN_FAILURE', 'ANALYSIS_FAILURE', 'INTERRUPTED',\n 'LOCK_HELD_NOBLOCK_FOR_LOCK', 'REMOTE_ENVIRONMENTAL_ERROR', 'OOM_ERROR',\n 'REMOTE_ERROR', 'LOCAL_ENVIRONMENT_ERROR', 'BLAZE_INTERNAL_ERROR',\n 'PUBLISH_ERROR', 'PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR']\n\n\nclass DistantEnum(Enum):\n\n def __str__(self):\n return str(self.value)\n\n\nclass CPU(DistantEnum):\n k8 = 'k8'\n piii = 'piii'\n darwin = 'darwin'\n freebsd = 'freebsd'\n armeabi = 'armeabi-v7a'\n arm = 'arm'\n aarch64 = 'aarch64'\n x64_windows = 'x64_windows'\n x64_windows_msvc = 'x64_windows_msvc'\n s390x = 's390x'\n ppc = 'ppc'\n ppc64 = 'ppc64'\n\n\nclass CompilationMode(DistantEnum):\n fastbuild = 'fastbuild'\n dbg = 'dbg'\n opt = 'opt'\n",
"step-5": "from enum import Enum\n\nEXIT_CODES = [\n \"SUCCESS\", \n \"BUILD_FAILURE\", \n \"PARSING_FAILURE\", \n \"COMMAND_LINE_ERROR\", \n \"TESTS_FAILED\",\n \"PARTIAL_ANALYSIS_FAILURE\",\n \"NO_TESTS_FOUND\",\n \"RUN_FAILURE\",\n \"ANALYSIS_FAILURE\",\n \"INTERRUPTED\",\n \"LOCK_HELD_NOBLOCK_FOR_LOCK\",\n \"REMOTE_ENVIRONMENTAL_ERROR\",\n \"OOM_ERROR\",\n \"REMOTE_ERROR\",\n \"LOCAL_ENVIRONMENT_ERROR\",\n \"BLAZE_INTERNAL_ERROR\",\n \"PUBLISH_ERROR\",\n \"PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR\"\n ]\n\nclass DistantEnum(Enum):\n def __str__(self):\n return str(self.value)\n\nclass CPU(DistantEnum):\n k8 = \"k8\"\n piii = \"piii\"\n darwin = \"darwin\"\n freebsd = \"freebsd\"\n armeabi = \"armeabi-v7a\"\n arm = \"arm\"\n aarch64 = \"aarch64\"\n x64_windows = \"x64_windows\"\n x64_windows_msvc = \"x64_windows_msvc\"\n s390x = \"s390x\"\n ppc = \"ppc\"\n ppc64 = \"ppc64\"\n\nclass CompilationMode(DistantEnum):\n fastbuild = \"fastbuild\"\n dbg = \"dbg\"\n opt = \"opt\"\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
from datapackage_pipelines.wrapper import ingest, spew
params, datapackage, res_iter = ingest()
columns = params['columns']
for resource in datapackage['resources']:
fields = resource.get('schema', {}).get('fields')
if fields is not None:
fields = [field for field in fields if field['name'] not in columns]
resource['schema']['fields'] = fields
def process_resources(_res_iter):
for rows in _res_iter:
def process_rows(_rows):
for row in _rows:
for column in columns:
if column in row:
del row[column]
yield row
yield process_rows(rows)
spew(datapackage, process_resources(res_iter))
|
normal
|
{
"blob_id": "17b3fb44d9e7a09fe3b807b47bdc0248b6960634",
"index": 4022,
"step-1": "<mask token>\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor resource in datapackage['resources']:\n fields = resource.get('schema', {}).get('fields')\n if fields is not None:\n fields = [field for field in fields if field['name'] not in columns]\n resource['schema']['fields'] = fields\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\nspew(datapackage, process_resources(res_iter))\n",
"step-3": "<mask token>\nparams, datapackage, res_iter = ingest()\ncolumns = params['columns']\nfor resource in datapackage['resources']:\n fields = resource.get('schema', {}).get('fields')\n if fields is not None:\n fields = [field for field in fields if field['name'] not in columns]\n resource['schema']['fields'] = fields\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\nspew(datapackage, process_resources(res_iter))\n",
"step-4": "from datapackage_pipelines.wrapper import ingest, spew\nparams, datapackage, res_iter = ingest()\ncolumns = params['columns']\nfor resource in datapackage['resources']:\n fields = resource.get('schema', {}).get('fields')\n if fields is not None:\n fields = [field for field in fields if field['name'] not in columns]\n resource['schema']['fields'] = fields\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\nspew(datapackage, process_resources(res_iter))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/env python
# This file just executes its arguments, except that also adds OUT_DIR to the
# environ. This is for compatibility with cargo.
import subprocess
import sys
import os
os.environ["OUT_DIR"] = os.path.abspath(".")
assert os.path.isdir(os.environ["OUT_DIR"])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
|
normal
|
{
"blob_id": "be238268b9fdd565f3cb0770839789b702940ef9",
"index": 8248,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nassert os.path.isdir(os.environ['OUT_DIR'])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-3": "<mask token>\nos.environ['OUT_DIR'] = os.path.abspath('.')\nassert os.path.isdir(os.environ['OUT_DIR'])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-4": "import subprocess\nimport sys\nimport os\nos.environ['OUT_DIR'] = os.path.abspath('.')\nassert os.path.isdir(os.environ['OUT_DIR'])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-5": "#!/usr/bin/env python\n# This file just executes its arguments, except that also adds OUT_DIR to the\n# environ. This is for compatibility with cargo.\nimport subprocess\nimport sys\nimport os\n\nos.environ[\"OUT_DIR\"] = os.path.abspath(\".\")\nassert os.path.isdir(os.environ[\"OUT_DIR\"])\nsys.exit(subprocess.call(sys.argv[1:], env=os.environ))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_entities(corpus):
doc = nlp(corpus)
entities = {}
for ent in doc.ents:
entity_type = ent.label_
entity_name = ent.text
values = entities.get(entity_type, set())
values.add(entity_name)
entities[entity_type] = values
return {key: list(val) for key, val in entities.items()}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
nlp = spacy.load('en_core_web_lg')
def find_entities(corpus):
doc = nlp(corpus)
entities = {}
for ent in doc.ents:
entity_type = ent.label_
entity_name = ent.text
values = entities.get(entity_type, set())
values.add(entity_name)
entities[entity_type] = values
return {key: list(val) for key, val in entities.items()}
<|reserved_special_token_1|>
import spacy
nlp = spacy.load('en_core_web_lg')
def find_entities(corpus):
doc = nlp(corpus)
entities = {}
for ent in doc.ents:
entity_type = ent.label_
entity_name = ent.text
values = entities.get(entity_type, set())
values.add(entity_name)
entities[entity_type] = values
return {key: list(val) for key, val in entities.items()}
<|reserved_special_token_1|>
import spacy
nlp = spacy.load("en_core_web_lg")
def find_entities(corpus):
doc = nlp(corpus)
entities = {}
for ent in doc.ents:
entity_type = ent.label_
entity_name = ent.text
values = entities.get(entity_type, set())
values.add(entity_name)
entities[entity_type] = values
return {key: list(val) for key, val in entities.items()}
|
flexible
|
{
"blob_id": "3a0bf031b76d2df03cdb5b37861cb8942307709c",
"index": 7601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n return {key: list(val) for key, val in entities.items()}\n",
"step-3": "<mask token>\nnlp = spacy.load('en_core_web_lg')\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n return {key: list(val) for key, val in entities.items()}\n",
"step-4": "import spacy\nnlp = spacy.load('en_core_web_lg')\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n return {key: list(val) for key, val in entities.items()}\n",
"step-5": "import spacy\n\nnlp = spacy.load(\"en_core_web_lg\")\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n\n return {key: list(val) for key, val in entities.items()}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Any, List
__all__: List[str]
record: Any
recarray: Any
format_parser: Any
fromarrays: Any
fromrecords: Any
fromstring: Any
fromfile: Any
array: Any
|
normal
|
{
"blob_id": "2e1ad83bcd16f59338032f8ad5ca8ebd74e92200",
"index": 6664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__: List[str]\nrecord: Any\nrecarray: Any\nformat_parser: Any\nfromarrays: Any\nfromrecords: Any\nfromstring: Any\nfromfile: Any\narray: Any\n",
"step-3": "from typing import Any, List\n__all__: List[str]\nrecord: Any\nrecarray: Any\nformat_parser: Any\nfromarrays: Any\nfromrecords: Any\nfromstring: Any\nfromfile: Any\narray: Any\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
from fw.api import dadata_proxy
from flask import current_app
from fw.cache.cache_wrapper import CacheWrapper
cache = CacheWrapper()
def dadata_suggest(method, data):
return dadata_proxy.dadata_suggest(method, data)
def dadata_clean(method, data):
return dadata_proxy.dadata_clean(method, data)
def get_detailed_address(address):
from fw.utils.address_utils import get_detailed_address as _get_detailed_address
return _get_detailed_address(address)
def dadata_standardize_address(address):
from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address
return _dadata_standardize_address(address)
def get_ifns_by_address(address, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address
return _get_ifns_by_address(address, service_nalog_ru_url)
def get_ifns_by_code(tax_office, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code
return _get_ifns_by_code(tax_office, service_nalog_ru_url)
def get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number, internal_ifns_service, logger):
from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots
return _get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number, internal_ifns_service, logger)
def book_ifns(person_data, company_data, internal_ifns_number, internal_ifns_service, dt, logger):
from services.ifns.ifns_manager import book_ifns as _book_ifns
return _book_ifns(person_data, company_data, internal_ifns_number, internal_ifns_service, dt, logger)
def get_registration_ifns(service_nalog_ru_url, address_ifns=None):
from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns
return _get_registration_ifns(service_nalog_ru_url, address_ifns=address_ifns)
def get_ifns_registrations(name, company_type='ooo', date_from=None, date_to=None,
service=None, ifns=None, service_nalog_ru_url=None, logger=None):
from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations
return _get_ifns_registrations(name, company_type=company_type, date_from=date_from, date_to=date_to,
service=service, ifns=ifns, service_nalog_ru_url=service_nalog_ru_url, logger=logger)
def check_car_policy(policy_series, policy_number, timeout=20.0):
from services.car_assurance.integration import check_car_policy as _check_car_policy
return _check_car_policy(policy_series, policy_number, timeout=timeout)
|
normal
|
{
"blob_id": "af4d2380f92ea636594695e5ad4ba766d6874dd3",
"index": 1355,
"step-1": "<mask token>\n\n\ndef dadata_clean(method, data):\n return dadata_proxy.dadata_clean(method, data)\n\n\ndef get_detailed_address(address):\n from fw.utils.address_utils import get_detailed_address as _get_detailed_address\n return _get_detailed_address(address)\n\n\ndef dadata_standardize_address(address):\n from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address\n return _dadata_standardize_address(address)\n\n\ndef get_ifns_by_address(address, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address\n return _get_ifns_by_address(address, service_nalog_ru_url)\n\n\n<mask token>\n\n\ndef get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,\n internal_ifns_service, logger):\n from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots\n return _get_nalog_ru_time_slots(person_data, company_data,\n internal_ifns_number, internal_ifns_service, logger)\n\n\ndef book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger):\n from services.ifns.ifns_manager import book_ifns as _book_ifns\n return _book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger)\n\n\ndef get_registration_ifns(service_nalog_ru_url, address_ifns=None):\n from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns\n return _get_registration_ifns(service_nalog_ru_url, address_ifns=\n address_ifns)\n\n\ndef get_ifns_registrations(name, company_type='ooo', date_from=None,\n date_to=None, service=None, ifns=None, service_nalog_ru_url=None,\n logger=None):\n from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations\n return _get_ifns_registrations(name, company_type=company_type,\n date_from=date_from, date_to=date_to, service=service, ifns=ifns,\n service_nalog_ru_url=service_nalog_ru_url, logger=logger)\n\n\ndef check_car_policy(policy_series, policy_number, timeout=20.0):\n from services.car_assurance.integration import check_car_policy as _check_car_policy\n return _check_car_policy(policy_series, policy_number, timeout=timeout)\n",
"step-2": "<mask token>\n\n\ndef dadata_suggest(method, data):\n return dadata_proxy.dadata_suggest(method, data)\n\n\ndef dadata_clean(method, data):\n return dadata_proxy.dadata_clean(method, data)\n\n\ndef get_detailed_address(address):\n from fw.utils.address_utils import get_detailed_address as _get_detailed_address\n return _get_detailed_address(address)\n\n\ndef dadata_standardize_address(address):\n from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address\n return _dadata_standardize_address(address)\n\n\ndef get_ifns_by_address(address, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address\n return _get_ifns_by_address(address, service_nalog_ru_url)\n\n\ndef get_ifns_by_code(tax_office, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code\n return _get_ifns_by_code(tax_office, service_nalog_ru_url)\n\n\ndef get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,\n internal_ifns_service, logger):\n from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots\n return _get_nalog_ru_time_slots(person_data, company_data,\n internal_ifns_number, internal_ifns_service, logger)\n\n\ndef book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger):\n from services.ifns.ifns_manager import book_ifns as _book_ifns\n return _book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger)\n\n\ndef get_registration_ifns(service_nalog_ru_url, address_ifns=None):\n from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns\n return _get_registration_ifns(service_nalog_ru_url, address_ifns=\n address_ifns)\n\n\ndef get_ifns_registrations(name, company_type='ooo', date_from=None,\n date_to=None, service=None, ifns=None, service_nalog_ru_url=None,\n logger=None):\n from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations\n return _get_ifns_registrations(name, company_type=company_type,\n date_from=date_from, date_to=date_to, service=service, ifns=ifns,\n service_nalog_ru_url=service_nalog_ru_url, logger=logger)\n\n\ndef check_car_policy(policy_series, policy_number, timeout=20.0):\n from services.car_assurance.integration import check_car_policy as _check_car_policy\n return _check_car_policy(policy_series, policy_number, timeout=timeout)\n",
"step-3": "<mask token>\ncache = CacheWrapper()\n\n\ndef dadata_suggest(method, data):\n return dadata_proxy.dadata_suggest(method, data)\n\n\ndef dadata_clean(method, data):\n return dadata_proxy.dadata_clean(method, data)\n\n\ndef get_detailed_address(address):\n from fw.utils.address_utils import get_detailed_address as _get_detailed_address\n return _get_detailed_address(address)\n\n\ndef dadata_standardize_address(address):\n from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address\n return _dadata_standardize_address(address)\n\n\ndef get_ifns_by_address(address, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address\n return _get_ifns_by_address(address, service_nalog_ru_url)\n\n\ndef get_ifns_by_code(tax_office, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code\n return _get_ifns_by_code(tax_office, service_nalog_ru_url)\n\n\ndef get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,\n internal_ifns_service, logger):\n from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots\n return _get_nalog_ru_time_slots(person_data, company_data,\n internal_ifns_number, internal_ifns_service, logger)\n\n\ndef book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger):\n from services.ifns.ifns_manager import book_ifns as _book_ifns\n return _book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger)\n\n\ndef get_registration_ifns(service_nalog_ru_url, address_ifns=None):\n from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns\n return _get_registration_ifns(service_nalog_ru_url, address_ifns=\n address_ifns)\n\n\ndef get_ifns_registrations(name, company_type='ooo', date_from=None,\n date_to=None, service=None, ifns=None, service_nalog_ru_url=None,\n logger=None):\n from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations\n return _get_ifns_registrations(name, company_type=company_type,\n date_from=date_from, date_to=date_to, service=service, ifns=ifns,\n service_nalog_ru_url=service_nalog_ru_url, logger=logger)\n\n\ndef check_car_policy(policy_series, policy_number, timeout=20.0):\n from services.car_assurance.integration import check_car_policy as _check_car_policy\n return _check_car_policy(policy_series, policy_number, timeout=timeout)\n",
"step-4": "from fw.api import dadata_proxy\nfrom flask import current_app\nfrom fw.cache.cache_wrapper import CacheWrapper\ncache = CacheWrapper()\n\n\ndef dadata_suggest(method, data):\n return dadata_proxy.dadata_suggest(method, data)\n\n\ndef dadata_clean(method, data):\n return dadata_proxy.dadata_clean(method, data)\n\n\ndef get_detailed_address(address):\n from fw.utils.address_utils import get_detailed_address as _get_detailed_address\n return _get_detailed_address(address)\n\n\ndef dadata_standardize_address(address):\n from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address\n return _dadata_standardize_address(address)\n\n\ndef get_ifns_by_address(address, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address\n return _get_ifns_by_address(address, service_nalog_ru_url)\n\n\ndef get_ifns_by_code(tax_office, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code\n return _get_ifns_by_code(tax_office, service_nalog_ru_url)\n\n\ndef get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,\n internal_ifns_service, logger):\n from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots\n return _get_nalog_ru_time_slots(person_data, company_data,\n internal_ifns_number, internal_ifns_service, logger)\n\n\ndef book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger):\n from services.ifns.ifns_manager import book_ifns as _book_ifns\n return _book_ifns(person_data, company_data, internal_ifns_number,\n internal_ifns_service, dt, logger)\n\n\ndef get_registration_ifns(service_nalog_ru_url, address_ifns=None):\n from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns\n return _get_registration_ifns(service_nalog_ru_url, address_ifns=\n address_ifns)\n\n\ndef get_ifns_registrations(name, company_type='ooo', date_from=None,\n date_to=None, service=None, ifns=None, service_nalog_ru_url=None,\n logger=None):\n from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations\n return _get_ifns_registrations(name, company_type=company_type,\n date_from=date_from, date_to=date_to, service=service, ifns=ifns,\n service_nalog_ru_url=service_nalog_ru_url, logger=logger)\n\n\ndef check_car_policy(policy_series, policy_number, timeout=20.0):\n from services.car_assurance.integration import check_car_policy as _check_car_policy\n return _check_car_policy(policy_series, policy_number, timeout=timeout)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom fw.api import dadata_proxy\nfrom flask import current_app\n\nfrom fw.cache.cache_wrapper import CacheWrapper\n\ncache = CacheWrapper()\n\n\ndef dadata_suggest(method, data):\n return dadata_proxy.dadata_suggest(method, data)\n\n\ndef dadata_clean(method, data):\n return dadata_proxy.dadata_clean(method, data)\n\n\ndef get_detailed_address(address):\n from fw.utils.address_utils import get_detailed_address as _get_detailed_address\n\n return _get_detailed_address(address)\n\n\ndef dadata_standardize_address(address):\n from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address\n\n return _dadata_standardize_address(address)\n\n\ndef get_ifns_by_address(address, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address\n\n return _get_ifns_by_address(address, service_nalog_ru_url)\n\n\ndef get_ifns_by_code(tax_office, service_nalog_ru_url):\n from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code\n\n return _get_ifns_by_code(tax_office, service_nalog_ru_url)\n\n\ndef get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number, internal_ifns_service, logger):\n from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots\n\n return _get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number, internal_ifns_service, logger)\n\n\ndef book_ifns(person_data, company_data, internal_ifns_number, internal_ifns_service, dt, logger):\n from services.ifns.ifns_manager import book_ifns as _book_ifns\n\n return _book_ifns(person_data, company_data, internal_ifns_number, internal_ifns_service, dt, logger)\n\n\ndef get_registration_ifns(service_nalog_ru_url, address_ifns=None):\n from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns\n\n return _get_registration_ifns(service_nalog_ru_url, address_ifns=address_ifns)\n\n\ndef get_ifns_registrations(name, company_type='ooo', date_from=None, date_to=None,\n service=None, ifns=None, service_nalog_ru_url=None, logger=None):\n from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations\n\n return _get_ifns_registrations(name, company_type=company_type, date_from=date_from, date_to=date_to,\n service=service, ifns=ifns, service_nalog_ru_url=service_nalog_ru_url, logger=logger)\n\ndef check_car_policy(policy_series, policy_number, timeout=20.0):\n from services.car_assurance.integration import check_car_policy as _check_car_policy\n return _check_car_policy(policy_series, policy_number, timeout=timeout)\n\n\n",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
import numpy as np
import os
pwd = os.path.dirname(os.path.realpath(__file__))
train_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle
=True)
test_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True)
train_data = train_data.reshape((1,))[0]
test_data = test_data.reshape((1,))[0]
X_train = train_data['X'].astype(np.float32)
X_test = test_data['X'].astype(np.float32)
y_train = train_data['y'].astype(np.int64)
y_test = test_data['y'].astype(np.int64)
def load(indices, category='train'):
if category == 'train':
if max(indices) < len(X_train) and max(indices) < len(y_train):
return X_train[indices], y_train[indices]
else:
l = np.array([a for a in indices if a < len(X_train) and a <
len(y_train)], np.int64)
return X_train[l], y_train[l]
elif category == 'test':
return X_test[indices], y_test[indices]
|
normal
|
{
"blob_id": "8c364a518ab615803ea99520e90ee1dd24d37a8c",
"index": 2524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef load(indices, category='train'):\n if category == 'train':\n if max(indices) < len(X_train) and max(indices) < len(y_train):\n return X_train[indices], y_train[indices]\n else:\n l = np.array([a for a in indices if a < len(X_train) and a <\n len(y_train)], np.int64)\n return X_train[l], y_train[l]\n elif category == 'test':\n return X_test[indices], y_test[indices]\n",
"step-3": "<mask token>\npwd = os.path.dirname(os.path.realpath(__file__))\ntrain_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle\n =True)\ntest_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True)\ntrain_data = train_data.reshape((1,))[0]\ntest_data = test_data.reshape((1,))[0]\nX_train = train_data['X'].astype(np.float32)\nX_test = test_data['X'].astype(np.float32)\ny_train = train_data['y'].astype(np.int64)\ny_test = test_data['y'].astype(np.int64)\n\n\ndef load(indices, category='train'):\n if category == 'train':\n if max(indices) < len(X_train) and max(indices) < len(y_train):\n return X_train[indices], y_train[indices]\n else:\n l = np.array([a for a in indices if a < len(X_train) and a <\n len(y_train)], np.int64)\n return X_train[l], y_train[l]\n elif category == 'test':\n return X_test[indices], y_test[indices]\n",
"step-4": "import numpy as np\nimport os\npwd = os.path.dirname(os.path.realpath(__file__))\ntrain_data = np.load(os.path.join(pwd, 'purchase2_train.npy'), allow_pickle\n =True)\ntest_data = np.load(os.path.join(pwd, 'purchase2_test.npy'), allow_pickle=True)\ntrain_data = train_data.reshape((1,))[0]\ntest_data = test_data.reshape((1,))[0]\nX_train = train_data['X'].astype(np.float32)\nX_test = test_data['X'].astype(np.float32)\ny_train = train_data['y'].astype(np.int64)\ny_test = test_data['y'].astype(np.int64)\n\n\ndef load(indices, category='train'):\n if category == 'train':\n if max(indices) < len(X_train) and max(indices) < len(y_train):\n return X_train[indices], y_train[indices]\n else:\n l = np.array([a for a in indices if a < len(X_train) and a <\n len(y_train)], np.int64)\n return X_train[l], y_train[l]\n elif category == 'test':\n return X_test[indices], y_test[indices]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from models import Person
from models import Skeleton
from models import Base_dolni
from models import Dolen_vrata
st = Person("Stoian")
Stoian = Person("Ivanov")
dolni = Skeleton(st, 900, 600, 2, 18, 28, 40)
dolni_st = Skeleton(Stoian, 900, 590, 2, 18, 28, 40)
dol_001 = Base_dolni(dolni_st, 550)
dol_001.set_description("dolen do mivkata")
dol_001.rendModul()
dol_002 = Dolen_vrata(dolni_st, 400, 2)
dol_002.set_description("долен втори с 2 врати")
dol_002.rendModul()
|
normal
|
{
"blob_id": "3d10f8810594303beb0ccabce3497de86149b2e5",
"index": 6666,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndol_001.set_description('dolen do mivkata')\ndol_001.rendModul()\n<mask token>\ndol_002.set_description('долен втори с 2 врати')\ndol_002.rendModul()\n",
"step-3": "<mask token>\nst = Person('Stoian')\nStoian = Person('Ivanov')\ndolni = Skeleton(st, 900, 600, 2, 18, 28, 40)\ndolni_st = Skeleton(Stoian, 900, 590, 2, 18, 28, 40)\ndol_001 = Base_dolni(dolni_st, 550)\ndol_001.set_description('dolen do mivkata')\ndol_001.rendModul()\ndol_002 = Dolen_vrata(dolni_st, 400, 2)\ndol_002.set_description('долен втори с 2 врати')\ndol_002.rendModul()\n",
"step-4": "from models import Person\nfrom models import Skeleton\nfrom models import Base_dolni\nfrom models import Dolen_vrata\nst = Person('Stoian')\nStoian = Person('Ivanov')\ndolni = Skeleton(st, 900, 600, 2, 18, 28, 40)\ndolni_st = Skeleton(Stoian, 900, 590, 2, 18, 28, 40)\ndol_001 = Base_dolni(dolni_st, 550)\ndol_001.set_description('dolen do mivkata')\ndol_001.rendModul()\ndol_002 = Dolen_vrata(dolni_st, 400, 2)\ndol_002.set_description('долен втори с 2 врати')\ndol_002.rendModul()\n",
"step-5": "from models import Person\nfrom models import Skeleton\nfrom models import Base_dolni\nfrom models import Dolen_vrata\n\nst = Person(\"Stoian\")\nStoian = Person(\"Ivanov\")\n\ndolni = Skeleton(st, 900, 600, 2, 18, 28, 40)\ndolni_st = Skeleton(Stoian, 900, 590, 2, 18, 28, 40)\n\ndol_001 = Base_dolni(dolni_st, 550)\ndol_001.set_description(\"dolen do mivkata\")\ndol_001.rendModul()\n\ndol_002 = Dolen_vrata(dolni_st, 400, 2)\ndol_002.set_description(\"долен втори с 2 врати\")\ndol_002.rendModul()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Ниже на четырёх языках программирования записана программа, которая вводит натуральное число 𝑥,
выполняет преобразования, а затем выводит результат. Укажите наименьшее значение 𝑥,
при вводе которого программа выведет число 10.
Тупо вручную ввёл. Крч 9. Хз, как на экзамене делать))
"""
x = int(input())
a = 3 * x + 23
b = 3 * x - 17
while a != b:
if a > b:
a -= b
else:
b -= a
print(a)
print('---')
number = 7
while number < 100:
x = number
a = 3 * x + 23
b = 3 * x - 17
while a != b:
if a > b:
a -= b
else:
b -= a
if a == 10:
print(x)
x += 1
|
normal
|
{
"blob_id": "181e9ac4acf0e69576716f3589359736bfbd9bef",
"index": 2380,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile a != b:\n if a > b:\n a -= b\n else:\n b -= a\nprint(a)\nprint('---')\n<mask token>\nwhile number < 100:\n x = number\n a = 3 * x + 23\n b = 3 * x - 17\n while a != b:\n if a > b:\n a -= b\n else:\n b -= a\n if a == 10:\n print(x)\n x += 1\n",
"step-3": "<mask token>\nx = int(input())\na = 3 * x + 23\nb = 3 * x - 17\nwhile a != b:\n if a > b:\n a -= b\n else:\n b -= a\nprint(a)\nprint('---')\nnumber = 7\nwhile number < 100:\n x = number\n a = 3 * x + 23\n b = 3 * x - 17\n while a != b:\n if a > b:\n a -= b\n else:\n b -= a\n if a == 10:\n print(x)\n x += 1\n",
"step-4": "\"\"\"\nНиже на четырёх языках программирования записана программа, которая вводит натуральное число 𝑥,\nвыполняет преобразования, а затем выводит результат. Укажите наименьшее значение 𝑥,\nпри вводе которого программа выведет число 10.\n\nТупо вручную ввёл. Крч 9. Хз, как на экзамене делать))\n\"\"\"\nx = int(input())\na = 3 * x + 23\nb = 3 * x - 17\nwhile a != b:\n if a > b:\n a -= b\n else:\n b -= a\nprint(a)\nprint('---')\nnumber = 7\nwhile number < 100:\n x = number\n a = 3 * x + 23\n b = 3 * x - 17\n while a != b:\n if a > b:\n a -= b\n else:\n b -= a\n if a == 10:\n print(x)\n x += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestView(BaseView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestView(BaseView):
<|reserved_special_token_0|>
template_name = 'test/music-1.html'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestView(BaseView):
"""
测试页面
"""
template_name = 'test/music-1.html'
<|reserved_special_token_1|>
from core.views import BaseView
class TestView(BaseView):
"""
测试页面
"""
template_name = 'test/music-1.html'
<|reserved_special_token_1|>
# -*-coding:utf-8 -*-
#
# Created on 2016-04-01
# __ __
# - /__) _ /__) __/
# / / ( (/ / ( /
# /
from core.views import BaseView
class TestView(BaseView):
"""
测试页面
"""
# template_name = 'test/blog-1.html'
template_name = 'test/music-1.html'
|
flexible
|
{
"blob_id": "dc2b074d7d0e87105b2479bb60b46c73dce6c069",
"index": 6113,
"step-1": "<mask token>\n\n\nclass TestView(BaseView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestView(BaseView):\n <mask token>\n template_name = 'test/music-1.html'\n",
"step-3": "<mask token>\n\n\nclass TestView(BaseView):\n \"\"\"\n 测试页面\n \"\"\"\n template_name = 'test/music-1.html'\n",
"step-4": "from core.views import BaseView\n\n\nclass TestView(BaseView):\n \"\"\"\n 测试页面\n \"\"\"\n template_name = 'test/music-1.html'\n",
"step-5": "# -*-coding:utf-8 -*-\n#\n# Created on 2016-04-01\n# __ __\n# - /__) _ /__) __/\n# / / ( (/ / ( /\n# /\n\nfrom core.views import BaseView\n\n\nclass TestView(BaseView):\n \"\"\"\n 测试页面\n \"\"\"\n\n # template_name = 'test/blog-1.html'\n template_name = 'test/music-1.html'\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.http import request
from restapp.ExcelSheet import *
'''ApiHomeDict={}
class LoadDict():
e = ExcelSheetAll()
ApiHomeDict = e.apiHomeDict()
print ApiHomeDict
class ReturnApi:
def returnDict(self):
return ApiHomeDict'''
'''if "ApiDictionary" in request.session:
print 'Dictioanry is already stored in session'
else:
e=ExcelSheetAll()
ApiHomeDict=e.apiHomeDict()
request.session['ApiDictionary'] = ApiHomeDict
#print ApiHomeDict
print request.session['ApiDictionary']'''
|
normal
|
{
"blob_id": "ff924b803a875d3f6201baa2c1251a6c5b8cde61",
"index": 5903,
"step-1": "<mask token>\n",
"step-2": "from django.http import request\nfrom restapp.ExcelSheet import *\n<mask token>\n",
"step-3": "from django.http import request\r\nfrom restapp.ExcelSheet import *\r\n\r\n\r\n'''ApiHomeDict={}\r\nclass LoadDict():\r\n e = ExcelSheetAll()\r\n ApiHomeDict = e.apiHomeDict()\r\n print ApiHomeDict\r\nclass ReturnApi:\r\n def returnDict(self):\r\n return ApiHomeDict'''\r\n'''if \"ApiDictionary\" in request.session:\r\n print 'Dictioanry is already stored in session'\r\nelse:\r\n e=ExcelSheetAll()\r\n ApiHomeDict=e.apiHomeDict()\r\n request.session['ApiDictionary'] = ApiHomeDict\r\n #print ApiHomeDict\r\nprint request.session['ApiDictionary']'''\r\n\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from enum import Enum
from roll.input import Input
from roll.network import Server, Client
from assets.game_projects.fighter.src.game_properties import GameProperties
from assets.game_projects.fighter.src.network_message import NetworkMessage
class InputBuffer:
"""
Responsible for collecting game input from both players. The game state will pull data from here if needed.
Network messages will also update the input buffer when receiving data from the opposite player
"""
class Value(Enum):
LEFT = "l"
RIGHT = "r"
UP = "u"
DOWN = "d"
WEAK_PUNCH = "wp"
def __init__(
self,
left_action_name: str,
right_action_name: str,
weak_punch_action_name: str,
frame_limit=12,
):
self._inputs = {}
self.left_action_name = left_action_name
self.right_action_name = right_action_name
self.weak_punch_action_name = weak_punch_action_name
self._frame_limit = frame_limit
def __str__(self):
return f"{self._inputs}"
def __repr__(self):
return f"{self._inputs}"
@property
def values(self) -> list:
return self._inputs.values()
def add_input(self, input, frame: int) -> None:
if frame in self._inputs:
self._inputs[frame].append(input.value)
else:
self._inputs[frame] = [input.value]
def get_inputs(self) -> dict:
return self._inputs
def get_frame_inputs(self, frame: int) -> list:
return self._inputs.get(frame, [])
def is_empty(self) -> bool:
return len(self._inputs) == 0
def clear(self):
self._inputs.clear()
def poll_client_inputs(self, frame: int) -> None:
if Input.is_action_pressed(action_name=self.left_action_name):
self.add_input(input=InputBuffer.Value.LEFT, frame=frame)
elif Input.is_action_pressed(action_name=self.right_action_name):
self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)
if Input.is_action_pressed(action_name=self.weak_punch_action_name):
self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)
self._inputs.pop(frame - self._frame_limit, None)
class OutgoingNetworkInputBuffer(InputBuffer):
def __init__(
self,
left_action_name: str,
right_action_name: str,
weak_punch_action_name: str,
frame_limit=12,
):
super().__init__(
left_action_name=left_action_name,
right_action_name=right_action_name,
weak_punch_action_name=weak_punch_action_name,
frame_limit=frame_limit,
)
self.game_properties = GameProperties()
def poll_client_inputs(self, frame: int) -> None:
super().poll_client_inputs(frame=frame)
frame_inputs = self.get_frame_inputs(frame=frame)
if frame_inputs:
if self.game_properties.is_server:
Server.send_message_to_all_clients(
message=f"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}"
)
else:
Client.send_message_to_server(
message=f"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}"
)
class IncomingNetworkInputBuffer(InputBuffer):
def __init__(self, frame_limit=12):
super().__init__(
left_action_name="",
right_action_name="",
weak_punch_action_name="",
frame_limit=frame_limit,
)
self.game_properties = GameProperties()
def add_input(self, input: str, frame: int) -> None:
if frame in self._inputs:
self._inputs[frame].append(input)
else:
self._inputs[frame] = [input]
def poll_client_inputs(self, frame: int) -> None:
# TODO: Proper prediction
if not self.game_properties.has_received_network_inputs:
pass
self._inputs.pop(frame - self._frame_limit, None)
|
normal
|
{
"blob_id": "4789546128263bd298f8f5827734f8402747b9ac",
"index": 67,
"step-1": "<mask token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n <mask token>\n <mask token>\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n",
"step-2": "<mask token>\n\n\nclass InputBuffer:\n <mask token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n <mask token>\n\n def __str__(self):\n return f'{self._inputs}'\n <mask token>\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n <mask token>\n <mask token>\n <mask token>\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n <mask token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n",
"step-3": "<mask token>\n\n\nclass InputBuffer:\n <mask token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n <mask token>\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n <mask token>\n <mask token>\n <mask token>\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n <mask token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n",
"step-4": "<mask token>\n\n\nclass InputBuffer:\n <mask token>\n\n\n class Value(Enum):\n LEFT = 'l'\n RIGHT = 'r'\n UP = 'u'\n DOWN = 'd'\n WEAK_PUNCH = 'wp'\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f'{self._inputs}'\n\n def __repr__(self):\n return f'{self._inputs}'\n\n @property\n def values(self) ->list:\n return self._inputs.values()\n <mask token>\n <mask token>\n\n def get_frame_inputs(self, frame: int) ->list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) ->bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n <mask token>\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, left_action_name: str, right_action_name: str,\n weak_punch_action_name: str, frame_limit=12):\n super().__init__(left_action_name=left_action_name,\n right_action_name=right_action_name, weak_punch_action_name=\n weak_punch_action_name, frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) ->None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n else:\n Client.send_message_to_server(message=\n f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n\n def __init__(self, frame_limit=12):\n super().__init__(left_action_name='', right_action_name='',\n weak_punch_action_name='', frame_limit=frame_limit)\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) ->None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) ->None:\n if not self.game_properties.has_received_network_inputs:\n pass\n self._inputs.pop(frame - self._frame_limit, None)\n",
"step-5": "from enum import Enum\n\nfrom roll.input import Input\nfrom roll.network import Server, Client\n\nfrom assets.game_projects.fighter.src.game_properties import GameProperties\nfrom assets.game_projects.fighter.src.network_message import NetworkMessage\n\n\nclass InputBuffer:\n \"\"\"\n Responsible for collecting game input from both players. The game state will pull data from here if needed.\n Network messages will also update the input buffer when receiving data from the opposite player\n \"\"\"\n\n class Value(Enum):\n LEFT = \"l\"\n RIGHT = \"r\"\n UP = \"u\"\n DOWN = \"d\"\n WEAK_PUNCH = \"wp\"\n\n def __init__(\n self,\n left_action_name: str,\n right_action_name: str,\n weak_punch_action_name: str,\n frame_limit=12,\n ):\n self._inputs = {}\n self.left_action_name = left_action_name\n self.right_action_name = right_action_name\n self.weak_punch_action_name = weak_punch_action_name\n self._frame_limit = frame_limit\n\n def __str__(self):\n return f\"{self._inputs}\"\n\n def __repr__(self):\n return f\"{self._inputs}\"\n\n @property\n def values(self) -> list:\n return self._inputs.values()\n\n def add_input(self, input, frame: int) -> None:\n if frame in self._inputs:\n self._inputs[frame].append(input.value)\n else:\n self._inputs[frame] = [input.value]\n\n def get_inputs(self) -> dict:\n return self._inputs\n\n def get_frame_inputs(self, frame: int) -> list:\n return self._inputs.get(frame, [])\n\n def is_empty(self) -> bool:\n return len(self._inputs) == 0\n\n def clear(self):\n self._inputs.clear()\n\n def poll_client_inputs(self, frame: int) -> None:\n if Input.is_action_pressed(action_name=self.left_action_name):\n self.add_input(input=InputBuffer.Value.LEFT, frame=frame)\n elif Input.is_action_pressed(action_name=self.right_action_name):\n self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)\n if Input.is_action_pressed(action_name=self.weak_punch_action_name):\n self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)\n\n self._inputs.pop(frame - self._frame_limit, None)\n\n\nclass OutgoingNetworkInputBuffer(InputBuffer):\n def __init__(\n self,\n left_action_name: str,\n right_action_name: str,\n weak_punch_action_name: str,\n frame_limit=12,\n ):\n super().__init__(\n left_action_name=left_action_name,\n right_action_name=right_action_name,\n weak_punch_action_name=weak_punch_action_name,\n frame_limit=frame_limit,\n )\n self.game_properties = GameProperties()\n\n def poll_client_inputs(self, frame: int) -> None:\n super().poll_client_inputs(frame=frame)\n frame_inputs = self.get_frame_inputs(frame=frame)\n if frame_inputs:\n if self.game_properties.is_server:\n Server.send_message_to_all_clients(\n message=f\"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}\"\n )\n else:\n Client.send_message_to_server(\n message=f\"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}\"\n )\n\n\nclass IncomingNetworkInputBuffer(InputBuffer):\n def __init__(self, frame_limit=12):\n super().__init__(\n left_action_name=\"\",\n right_action_name=\"\",\n weak_punch_action_name=\"\",\n frame_limit=frame_limit,\n )\n self.game_properties = GameProperties()\n\n def add_input(self, input: str, frame: int) -> None:\n if frame in self._inputs:\n self._inputs[frame].append(input)\n else:\n self._inputs[frame] = [input]\n\n def poll_client_inputs(self, frame: int) -> None:\n # TODO: Proper prediction\n if not self.game_properties.has_received_network_inputs:\n pass\n\n self._inputs.pop(frame - self._frame_limit, None)\n",
"step-ids": [
5,
12,
13,
15,
21
]
}
|
[
5,
12,
13,
15,
21
] |
#!/usr/bin/env python3
print(sum([row[lineNumber * 3 % len(row)] == '#' for lineNumber, row in enumerate(open('input.txt').read().splitlines())]))
|
normal
|
{
"blob_id": "b2fecadbd99edb89379f82a935aa1622f043eeac",
"index": 9099,
"step-1": "<mask token>\n",
"step-2": "print(sum([(row[lineNumber * 3 % len(row)] == '#') for lineNumber, row in\n enumerate(open('input.txt').read().splitlines())]))\n",
"step-3": "#!/usr/bin/env python3\n\nprint(sum([row[lineNumber * 3 % len(row)] == '#' for lineNumber, row in enumerate(open('input.txt').read().splitlines())]))",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.filter(name='phone_number')
def phone_number(number):
first = number[0:3]
second = number[3:6]
third = number[6:10]
return '(' + first + ')' + ' ' + second + '-' + third
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.filter(name='phone_number')
def phone_number(number):
first = number[0:3]
second = number[3:6]
third = number[6:10]
return '(' + first + ')' + ' ' + second + '-' + third
<|reserved_special_token_1|>
from django import template
register = template.Library()
@register.filter(name='phone_number')
def phone_number(number):
first = number[0:3]
second = number[3:6]
third = number[6:10]
return '(' + first + ')' + ' ' + second + '-' + third
<|reserved_special_token_1|>
from django import template
register = template.Library()
@register.filter(name='phone_number')
def phone_number(number): # Convert a 10 character string into (xxx) xxx-xxxx.
first = number[0:3]
second = number[3:6]
third = number[6:10]
return '(' + first + ')' + ' ' + second + '-' + third
|
flexible
|
{
"blob_id": "5e79a8a8fe79aac900fc0c2ff1caaa73ea08ada2",
"index": 5697,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@register.filter(name='phone_number')\ndef phone_number(number):\n first = number[0:3]\n second = number[3:6]\n third = number[6:10]\n return '(' + first + ')' + ' ' + second + '-' + third\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.filter(name='phone_number')\ndef phone_number(number):\n first = number[0:3]\n second = number[3:6]\n third = number[6:10]\n return '(' + first + ')' + ' ' + second + '-' + third\n",
"step-4": "from django import template\nregister = template.Library()\n\n\n@register.filter(name='phone_number')\ndef phone_number(number):\n first = number[0:3]\n second = number[3:6]\n third = number[6:10]\n return '(' + first + ')' + ' ' + second + '-' + third\n",
"step-5": "from django import template\n\nregister = template.Library()\n\n\n@register.filter(name='phone_number')\ndef phone_number(number): # Convert a 10 character string into (xxx) xxx-xxxx.\n\tfirst = number[0:3]\n\tsecond = number[3:6]\n\tthird = number[6:10]\n\treturn '(' + first + ')' + ' ' + second + '-' + third\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.disable_v2_behavior()
<|reserved_special_token_0|>
print('Loading video {video_path}...'.format(video_path=video_path))
if not os.path.exists(video_path):
print('File does not exist. Exited.')
exit()
<|reserved_special_token_0|>
with tf.Session() as sess:
sess.run(model.pretrained())
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width_scale = 1
height_scale = 1
if use_original_video_size_as_output_size:
width_scale = width / img_size
height_scale = height / img_size
def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=
width_scale, height_scale=height_scale):
point1 = int(pt1[0] * width_scale), int(pt1[1] * height_scale)
point2 = int(pt2[0] * width_scale), int(pt2[1] * height_scale)
return cv2.rectangle(img, point1, point2, color, thickness)
def drawTextCV2(img, text, pt, font, font_scale, color, lineType,
width_scale=width_scale, height_scale=height_scale):
pt = int(pt[0] * width_scale), int(pt[1] * height_scale)
cv2.putText(img, text, pt, font, font_scale, color, lineType)
def drawCircleCV2(img, center, radius, color, thickness, width_scale=
width_scale, height_scale=height_scale):
center = int(center[0] * width_scale), int(center[1] * height_scale)
cv2.circle(img, center, radius, color, thickness)
print('Loaded {video_path}. Width: {width}, Height: {height}'.format(
video_path=video_path, width=width, height=height))
skipped_frames_counter = 0
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
print('Error reading frame. cap.read() returned {ret}'.format(ret))
img = cv2.resize(frame, (img_size, img_size))
output_img = frame if use_original_video_size_as_output_size else img
tracker_rects = []
if skipped_frames_counter == skip_frames:
print('[DETECTING]')
trackers = []
skipped_frames_counter = 0
np_img = np.array(img).reshape(-1, img_size, img_size, 3)
start_time = time.time()
predictions = sess.run(model.preds, {inputs: model.preprocess(
np_img)})
print('Detection took %s seconds' % (time.time() - start_time))
detections = model.get_boxes(predictions, np_img.shape[1:3])
np_detections = np.array(detections)
for class_index in classes.keys():
local_count = 0
class_name = classes[class_index]
for i in range(len(np_detections[class_index])):
box = np_detections[class_index][i]
if np_detections[class_index][i][4] >= confidence_level:
print('Detected ', class_name,
' with confidence of ', np_detections[
class_index][i][4])
local_count += 1
startX, startY, endX, endY = box[0], box[1], box[2
], box[3]
drawRectangleCV2(output_img, (startX, startY), (
endX, endY), (0, 255, 0), 1)
drawTextCV2(output_img, class_name, (startX, startY
), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startX), int(startY), int
(endX), int(endY))
tracker.start_track(img, rect)
trackers.append(tracker)
print(class_name, ' : ', local_count)
else:
print('[TRACKING]')
skipped_frames_counter += 1
for tracker in trackers:
tracker.update(img)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
tracker_rects.append((startX, startY, endX, endY))
drawRectangleCV2(output_img, (startX, startY), (endX, endY),
(255, 0, 0), 1)
objects = ct.update(tracker_rects)
for objectID, centroid in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
to.centroids.append(centroid)
if not to.counted:
total += 1
to.counted = True
trackableObjects[objectID] = to
object_id = 'ID {}'.format(objectID)
drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[
1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0,
255, 0), -1)
total_str = 'Total counted: ' + str(total)
drawTextCV2(output_img, total_str, (10, 30), cv2.
FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(video_name, output_img)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
elif key == ord('p'):
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
print('Exited')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.disable_v2_behavior()
img_size = 416
inputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])
model = nets.YOLOv3COCO(inputs, nets.Darknet19)
ct = CentroidTracker(maxDisappeared=5, maxDistance=50)
trackers = []
trackableObjects = {}
skip_frames = 10
confidence_level = 0.4
total = 0
use_original_video_size_as_output_size = True
video_path = os.getcwd() + '/videos/M6 Motorway Traffic - Short version.mp4'
video_name = os.path.basename(video_path)
print('Loading video {video_path}...'.format(video_path=video_path))
if not os.path.exists(video_path):
print('File does not exist. Exited.')
exit()
all_classes = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'sofa', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tvmonitor',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
classes = {(1): 'bicycle', (2): 'car', (3): 'motorbike', (5): 'bus', (7):
'truck'}
with tf.Session() as sess:
sess.run(model.pretrained())
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width_scale = 1
height_scale = 1
if use_original_video_size_as_output_size:
width_scale = width / img_size
height_scale = height / img_size
def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=
width_scale, height_scale=height_scale):
point1 = int(pt1[0] * width_scale), int(pt1[1] * height_scale)
point2 = int(pt2[0] * width_scale), int(pt2[1] * height_scale)
return cv2.rectangle(img, point1, point2, color, thickness)
def drawTextCV2(img, text, pt, font, font_scale, color, lineType,
width_scale=width_scale, height_scale=height_scale):
pt = int(pt[0] * width_scale), int(pt[1] * height_scale)
cv2.putText(img, text, pt, font, font_scale, color, lineType)
def drawCircleCV2(img, center, radius, color, thickness, width_scale=
width_scale, height_scale=height_scale):
center = int(center[0] * width_scale), int(center[1] * height_scale)
cv2.circle(img, center, radius, color, thickness)
print('Loaded {video_path}. Width: {width}, Height: {height}'.format(
video_path=video_path, width=width, height=height))
skipped_frames_counter = 0
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
print('Error reading frame. cap.read() returned {ret}'.format(ret))
img = cv2.resize(frame, (img_size, img_size))
output_img = frame if use_original_video_size_as_output_size else img
tracker_rects = []
if skipped_frames_counter == skip_frames:
print('[DETECTING]')
trackers = []
skipped_frames_counter = 0
np_img = np.array(img).reshape(-1, img_size, img_size, 3)
start_time = time.time()
predictions = sess.run(model.preds, {inputs: model.preprocess(
np_img)})
print('Detection took %s seconds' % (time.time() - start_time))
detections = model.get_boxes(predictions, np_img.shape[1:3])
np_detections = np.array(detections)
for class_index in classes.keys():
local_count = 0
class_name = classes[class_index]
for i in range(len(np_detections[class_index])):
box = np_detections[class_index][i]
if np_detections[class_index][i][4] >= confidence_level:
print('Detected ', class_name,
' with confidence of ', np_detections[
class_index][i][4])
local_count += 1
startX, startY, endX, endY = box[0], box[1], box[2
], box[3]
drawRectangleCV2(output_img, (startX, startY), (
endX, endY), (0, 255, 0), 1)
drawTextCV2(output_img, class_name, (startX, startY
), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startX), int(startY), int
(endX), int(endY))
tracker.start_track(img, rect)
trackers.append(tracker)
print(class_name, ' : ', local_count)
else:
print('[TRACKING]')
skipped_frames_counter += 1
for tracker in trackers:
tracker.update(img)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
tracker_rects.append((startX, startY, endX, endY))
drawRectangleCV2(output_img, (startX, startY), (endX, endY),
(255, 0, 0), 1)
objects = ct.update(tracker_rects)
for objectID, centroid in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
to.centroids.append(centroid)
if not to.counted:
total += 1
to.counted = True
trackableObjects[objectID] = to
object_id = 'ID {}'.format(objectID)
drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[
1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0,
255, 0), -1)
total_str = 'Total counted: ' + str(total)
drawTextCV2(output_img, total_str, (10, 30), cv2.
FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(video_name, output_img)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
elif key == ord('p'):
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
print('Exited')
<|reserved_special_token_1|>
from tracking.centroidtracker import CentroidTracker
from tracking.trackableobject import TrackableObject
import tensornets as nets
import cv2
import numpy as np
import time
import dlib
import tensorflow.compat.v1 as tf
import os
tf.disable_v2_behavior()
img_size = 416
inputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])
model = nets.YOLOv3COCO(inputs, nets.Darknet19)
ct = CentroidTracker(maxDisappeared=5, maxDistance=50)
trackers = []
trackableObjects = {}
skip_frames = 10
confidence_level = 0.4
total = 0
use_original_video_size_as_output_size = True
video_path = os.getcwd() + '/videos/M6 Motorway Traffic - Short version.mp4'
video_name = os.path.basename(video_path)
print('Loading video {video_path}...'.format(video_path=video_path))
if not os.path.exists(video_path):
print('File does not exist. Exited.')
exit()
all_classes = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'sofa', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tvmonitor',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
classes = {(1): 'bicycle', (2): 'car', (3): 'motorbike', (5): 'bus', (7):
'truck'}
with tf.Session() as sess:
sess.run(model.pretrained())
cap = cv2.VideoCapture(video_path)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width_scale = 1
height_scale = 1
if use_original_video_size_as_output_size:
width_scale = width / img_size
height_scale = height / img_size
def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=
width_scale, height_scale=height_scale):
point1 = int(pt1[0] * width_scale), int(pt1[1] * height_scale)
point2 = int(pt2[0] * width_scale), int(pt2[1] * height_scale)
return cv2.rectangle(img, point1, point2, color, thickness)
def drawTextCV2(img, text, pt, font, font_scale, color, lineType,
width_scale=width_scale, height_scale=height_scale):
pt = int(pt[0] * width_scale), int(pt[1] * height_scale)
cv2.putText(img, text, pt, font, font_scale, color, lineType)
def drawCircleCV2(img, center, radius, color, thickness, width_scale=
width_scale, height_scale=height_scale):
center = int(center[0] * width_scale), int(center[1] * height_scale)
cv2.circle(img, center, radius, color, thickness)
print('Loaded {video_path}. Width: {width}, Height: {height}'.format(
video_path=video_path, width=width, height=height))
skipped_frames_counter = 0
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
print('Error reading frame. cap.read() returned {ret}'.format(ret))
img = cv2.resize(frame, (img_size, img_size))
output_img = frame if use_original_video_size_as_output_size else img
tracker_rects = []
if skipped_frames_counter == skip_frames:
print('[DETECTING]')
trackers = []
skipped_frames_counter = 0
np_img = np.array(img).reshape(-1, img_size, img_size, 3)
start_time = time.time()
predictions = sess.run(model.preds, {inputs: model.preprocess(
np_img)})
print('Detection took %s seconds' % (time.time() - start_time))
detections = model.get_boxes(predictions, np_img.shape[1:3])
np_detections = np.array(detections)
for class_index in classes.keys():
local_count = 0
class_name = classes[class_index]
for i in range(len(np_detections[class_index])):
box = np_detections[class_index][i]
if np_detections[class_index][i][4] >= confidence_level:
print('Detected ', class_name,
' with confidence of ', np_detections[
class_index][i][4])
local_count += 1
startX, startY, endX, endY = box[0], box[1], box[2
], box[3]
drawRectangleCV2(output_img, (startX, startY), (
endX, endY), (0, 255, 0), 1)
drawTextCV2(output_img, class_name, (startX, startY
), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startX), int(startY), int
(endX), int(endY))
tracker.start_track(img, rect)
trackers.append(tracker)
print(class_name, ' : ', local_count)
else:
print('[TRACKING]')
skipped_frames_counter += 1
for tracker in trackers:
tracker.update(img)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
tracker_rects.append((startX, startY, endX, endY))
drawRectangleCV2(output_img, (startX, startY), (endX, endY),
(255, 0, 0), 1)
objects = ct.update(tracker_rects)
for objectID, centroid in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
to.centroids.append(centroid)
if not to.counted:
total += 1
to.counted = True
trackableObjects[objectID] = to
object_id = 'ID {}'.format(objectID)
drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[
1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0,
255, 0), -1)
total_str = 'Total counted: ' + str(total)
drawTextCV2(output_img, total_str, (10, 30), cv2.
FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(video_name, output_img)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
elif key == ord('p'):
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
print('Exited')
<|reserved_special_token_1|>
from tracking.centroidtracker import CentroidTracker
from tracking.trackableobject import TrackableObject
import tensornets as nets
import cv2
import numpy as np
import time
import dlib
import tensorflow.compat.v1 as tf
import os
# For 'disable_v2_behavior' see https://github.com/theislab/scgen/issues/14
tf.disable_v2_behavior()
# Image size must be '416x416' as YoloV3 network expects that specific image size as input
img_size = 416
inputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])
model = nets.YOLOv3COCO(inputs, nets.Darknet19)
ct = CentroidTracker(maxDisappeared=5, maxDistance=50) # Look into 'CentroidTracker' for further info about parameters
trackers = [] # List of all dlib trackers
trackableObjects = {} # Dictionary of trackable objects containing object's ID and its' corresponding centroid/s
skip_frames = 10 # Numbers of frames to skip from detecting
confidence_level = 0.40 # The confidence level of a detection
total = 0 # Total number of detected objects from classes of interest
use_original_video_size_as_output_size = True # Shows original video as output and not the 416x416 image that is used as yolov3 input (NOTE: Detection still happens with 416x416 img size but the output is displayed in original video size if this parameter is True)
video_path = os.getcwd() + "/videos/M6 Motorway Traffic - Short version.mp4"
video_name = os.path.basename(video_path)
print("Loading video {video_path}...".format(video_path=video_path))
if not os.path.exists(video_path):
print("File does not exist. Exited.")
exit()
# From https://github.com/experiencor/keras-yolo3/blob/master/yolo3_one_file_to_detect_them_all.py#L389
# YoloV3 detects 80 classes represented below
all_classes = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# Classes of interest (with their corresponding indexes for easier looping)
classes = { 1 : 'bicycle', 2 : 'car', 3 : 'motorbike', 5 : 'bus', 7 : 'truck' }
with tf.Session() as sess:
sess.run(model.pretrained())
cap = cv2.VideoCapture(video_path)
# Get video size (just for log purposes)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Scale used for output window size and net size
width_scale = 1
height_scale = 1
if use_original_video_size_as_output_size:
width_scale = width / img_size
height_scale = height / img_size
def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=width_scale, height_scale=height_scale):
point1 = (int(pt1[0] * width_scale), int(pt1[1] * height_scale))
point2 = (int(pt2[0] * width_scale), int(pt2[1] * height_scale))
return cv2.rectangle(img, point1, point2, color, thickness)
def drawTextCV2(img, text, pt, font, font_scale, color, lineType, width_scale=width_scale, height_scale=height_scale):
pt = (int(pt[0] * width_scale), int(pt[1] * height_scale))
cv2.putText(img, text, pt, font, font_scale, color, lineType)
def drawCircleCV2(img, center, radius, color, thickness, width_scale=width_scale, height_scale=height_scale):
center = (int(center[0] * width_scale), int(center[1] * height_scale))
cv2.circle(img, center, radius, color, thickness)
# Python 3.5.6 does not support f-strings (next line will generate syntax error)
#print(f"Loaded {video_path}. Width: {width}, Height: {height}")
print("Loaded {video_path}. Width: {width}, Height: {height}".format(video_path=video_path, width=width, height=height))
skipped_frames_counter = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
print("Error reading frame. cap.read() returned {ret}".format(ret))
# Frame must be resized to 'img_size' (because that's what YoloV3 accepts as input)
img = cv2.resize(frame, (img_size, img_size))
# Output image is used for drawing annotations (tracking rectangles and detected classes) on the image
output_img = frame if use_original_video_size_as_output_size else img
tracker_rects = []
if skipped_frames_counter == skip_frames:
# Detecting happens after number of frames have passes specified by 'skip_frames' variable value
print("[DETECTING]")
trackers = []
skipped_frames_counter = 0 # reset counter
np_img = np.array(img).reshape(-1, img_size, img_size, 3)
start_time=time.time()
predictions = sess.run(model.preds, {inputs: model.preprocess(np_img)})
print("Detection took %s seconds" % (time.time() - start_time))
# model.get_boxes returns a 80 element array containing information about detected classes
# each element contains a list of detected boxes, confidence level ...
detections = model.get_boxes(predictions, np_img.shape[1:3])
np_detections = np.array(detections)
# Loop only through classes we are interested in
for class_index in classes.keys():
local_count = 0
class_name = classes[class_index]
# Loop through detected infos of a class we are interested in
for i in range(len(np_detections[class_index])):
box = np_detections[class_index][i]
if np_detections[class_index][i][4] >= confidence_level:
print("Detected ", class_name, " with confidence of ", np_detections[class_index][i][4])
local_count += 1
startX, startY, endX, endY = box[0], box[1], box[2], box[3]
drawRectangleCV2(output_img, (startX, startY), (endX, endY), (0, 255, 0), 1)
drawTextCV2(output_img, class_name, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 1)
# Construct a dlib rectangle object from the bounding box coordinates and then start the dlib correlation
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))
tracker.start_track(img, rect)
# Add the tracker to our list of trackers so we can utilize it during skip frames
trackers.append(tracker)
# Write the total number of detected objects for a given class on this frame
print(class_name," : ", local_count)
else:
# If detection is not happening then track previously detected objects (if any)
print("[TRACKING]")
skipped_frames_counter += 1 # Increase the number frames for which we did not use detection
# Loop through tracker, update each of them and display their rectangle
for tracker in trackers:
tracker.update(img)
pos = tracker.get_position()
# Unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# Add the bounding box coordinates to the tracking rectangles list
tracker_rects.append((startX, startY, endX, endY))
# Draw tracking rectangles
drawRectangleCV2(output_img, (startX, startY), (endX, endY), (255, 0, 0), 1)
# Use the centroid tracker to associate the (1) old object centroids with (2) the newly computed object centroids
objects = ct.update(tracker_rects)
# Loop over the tracked objects
for (objectID, centroid) in objects.items():
# Check to see if a trackable object exists for the current object ID
to = trackableObjects.get(objectID, None)
if to is None:
# If there is no existing trackable object, create one
to = TrackableObject(objectID, centroid)
else:
to.centroids.append(centroid)
# If the object has not been counted, count it and mark it as counted
if not to.counted:
total += 1
to.counted = True
# Store the trackable object in our dictionary
trackableObjects[objectID] = to
# Draw both the ID of the object and the centroid of the object on the output frame
object_id = "ID {}".format(objectID)
drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0, 255, 0), -1)
# Display the total count so far
total_str = "Total counted: " + str(total)
drawTextCV2(output_img, total_str, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# Display the current frame (with all annotations drawn up to this point)
cv2.imshow(video_name, output_img)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'): # QUIT (exits)
break
elif key == ord('p'):
cv2.waitKey(0) # PAUSE (Enter any key to continue)
cap.release()
cv2.destroyAllWindows()
print("Exited")
|
flexible
|
{
"blob_id": "7b01e81c3e31e0a315ee01f36bf1b1f7384a9d10",
"index": 3597,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntf.disable_v2_behavior()\n<mask token>\nprint('Loading video {video_path}...'.format(video_path=video_path))\nif not os.path.exists(video_path):\n print('File does not exist. Exited.')\n exit()\n<mask token>\nwith tf.Session() as sess:\n sess.run(model.pretrained())\n cap = cv2.VideoCapture(video_path)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n width_scale = 1\n height_scale = 1\n if use_original_video_size_as_output_size:\n width_scale = width / img_size\n height_scale = height / img_size\n\n def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=\n width_scale, height_scale=height_scale):\n point1 = int(pt1[0] * width_scale), int(pt1[1] * height_scale)\n point2 = int(pt2[0] * width_scale), int(pt2[1] * height_scale)\n return cv2.rectangle(img, point1, point2, color, thickness)\n\n def drawTextCV2(img, text, pt, font, font_scale, color, lineType,\n width_scale=width_scale, height_scale=height_scale):\n pt = int(pt[0] * width_scale), int(pt[1] * height_scale)\n cv2.putText(img, text, pt, font, font_scale, color, lineType)\n\n def drawCircleCV2(img, center, radius, color, thickness, width_scale=\n width_scale, height_scale=height_scale):\n center = int(center[0] * width_scale), int(center[1] * height_scale)\n cv2.circle(img, center, radius, color, thickness)\n print('Loaded {video_path}. Width: {width}, Height: {height}'.format(\n video_path=video_path, width=width, height=height))\n skipped_frames_counter = 0\n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n print('Error reading frame. cap.read() returned {ret}'.format(ret))\n img = cv2.resize(frame, (img_size, img_size))\n output_img = frame if use_original_video_size_as_output_size else img\n tracker_rects = []\n if skipped_frames_counter == skip_frames:\n print('[DETECTING]')\n trackers = []\n skipped_frames_counter = 0\n np_img = np.array(img).reshape(-1, img_size, img_size, 3)\n start_time = time.time()\n predictions = sess.run(model.preds, {inputs: model.preprocess(\n np_img)})\n print('Detection took %s seconds' % (time.time() - start_time))\n detections = model.get_boxes(predictions, np_img.shape[1:3])\n np_detections = np.array(detections)\n for class_index in classes.keys():\n local_count = 0\n class_name = classes[class_index]\n for i in range(len(np_detections[class_index])):\n box = np_detections[class_index][i]\n if np_detections[class_index][i][4] >= confidence_level:\n print('Detected ', class_name,\n ' with confidence of ', np_detections[\n class_index][i][4])\n local_count += 1\n startX, startY, endX, endY = box[0], box[1], box[2\n ], box[3]\n drawRectangleCV2(output_img, (startX, startY), (\n endX, endY), (0, 255, 0), 1)\n drawTextCV2(output_img, class_name, (startX, startY\n ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(int(startX), int(startY), int\n (endX), int(endY))\n tracker.start_track(img, rect)\n trackers.append(tracker)\n print(class_name, ' : ', local_count)\n else:\n print('[TRACKING]')\n skipped_frames_counter += 1\n for tracker in trackers:\n tracker.update(img)\n pos = tracker.get_position()\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n tracker_rects.append((startX, startY, endX, endY))\n drawRectangleCV2(output_img, (startX, startY), (endX, endY),\n (255, 0, 0), 1)\n objects = ct.update(tracker_rects)\n for objectID, centroid in objects.items():\n to = trackableObjects.get(objectID, None)\n if to is None:\n to = TrackableObject(objectID, centroid)\n else:\n to.centroids.append(centroid)\n if not to.counted:\n total += 1\n to.counted = True\n trackableObjects[objectID] = to\n object_id = 'ID {}'.format(objectID)\n drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[\n 1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0, \n 255, 0), -1)\n total_str = 'Total counted: ' + str(total)\n drawTextCV2(output_img, total_str, (10, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n cv2.imshow(video_name, output_img)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n elif key == ord('p'):\n cv2.waitKey(0)\ncap.release()\ncv2.destroyAllWindows()\nprint('Exited')\n",
"step-3": "<mask token>\ntf.disable_v2_behavior()\nimg_size = 416\ninputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])\nmodel = nets.YOLOv3COCO(inputs, nets.Darknet19)\nct = CentroidTracker(maxDisappeared=5, maxDistance=50)\ntrackers = []\ntrackableObjects = {}\nskip_frames = 10\nconfidence_level = 0.4\ntotal = 0\nuse_original_video_size_as_output_size = True\nvideo_path = os.getcwd() + '/videos/M6 Motorway Traffic - Short version.mp4'\nvideo_name = os.path.basename(video_path)\nprint('Loading video {video_path}...'.format(video_path=video_path))\nif not os.path.exists(video_path):\n print('File does not exist. Exited.')\n exit()\nall_classes = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n 'sofa', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tvmonitor',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\nclasses = {(1): 'bicycle', (2): 'car', (3): 'motorbike', (5): 'bus', (7):\n 'truck'}\nwith tf.Session() as sess:\n sess.run(model.pretrained())\n cap = cv2.VideoCapture(video_path)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n width_scale = 1\n height_scale = 1\n if use_original_video_size_as_output_size:\n width_scale = width / img_size\n height_scale = height / img_size\n\n def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=\n width_scale, height_scale=height_scale):\n point1 = int(pt1[0] * width_scale), int(pt1[1] * height_scale)\n point2 = int(pt2[0] * width_scale), int(pt2[1] * height_scale)\n return cv2.rectangle(img, point1, point2, color, thickness)\n\n def drawTextCV2(img, text, pt, font, font_scale, color, lineType,\n width_scale=width_scale, height_scale=height_scale):\n pt = int(pt[0] * width_scale), int(pt[1] * height_scale)\n cv2.putText(img, text, pt, font, font_scale, color, lineType)\n\n def drawCircleCV2(img, center, radius, color, thickness, width_scale=\n width_scale, height_scale=height_scale):\n center = int(center[0] * width_scale), int(center[1] * height_scale)\n cv2.circle(img, center, radius, color, thickness)\n print('Loaded {video_path}. Width: {width}, Height: {height}'.format(\n video_path=video_path, width=width, height=height))\n skipped_frames_counter = 0\n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n print('Error reading frame. cap.read() returned {ret}'.format(ret))\n img = cv2.resize(frame, (img_size, img_size))\n output_img = frame if use_original_video_size_as_output_size else img\n tracker_rects = []\n if skipped_frames_counter == skip_frames:\n print('[DETECTING]')\n trackers = []\n skipped_frames_counter = 0\n np_img = np.array(img).reshape(-1, img_size, img_size, 3)\n start_time = time.time()\n predictions = sess.run(model.preds, {inputs: model.preprocess(\n np_img)})\n print('Detection took %s seconds' % (time.time() - start_time))\n detections = model.get_boxes(predictions, np_img.shape[1:3])\n np_detections = np.array(detections)\n for class_index in classes.keys():\n local_count = 0\n class_name = classes[class_index]\n for i in range(len(np_detections[class_index])):\n box = np_detections[class_index][i]\n if np_detections[class_index][i][4] >= confidence_level:\n print('Detected ', class_name,\n ' with confidence of ', np_detections[\n class_index][i][4])\n local_count += 1\n startX, startY, endX, endY = box[0], box[1], box[2\n ], box[3]\n drawRectangleCV2(output_img, (startX, startY), (\n endX, endY), (0, 255, 0), 1)\n drawTextCV2(output_img, class_name, (startX, startY\n ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(int(startX), int(startY), int\n (endX), int(endY))\n tracker.start_track(img, rect)\n trackers.append(tracker)\n print(class_name, ' : ', local_count)\n else:\n print('[TRACKING]')\n skipped_frames_counter += 1\n for tracker in trackers:\n tracker.update(img)\n pos = tracker.get_position()\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n tracker_rects.append((startX, startY, endX, endY))\n drawRectangleCV2(output_img, (startX, startY), (endX, endY),\n (255, 0, 0), 1)\n objects = ct.update(tracker_rects)\n for objectID, centroid in objects.items():\n to = trackableObjects.get(objectID, None)\n if to is None:\n to = TrackableObject(objectID, centroid)\n else:\n to.centroids.append(centroid)\n if not to.counted:\n total += 1\n to.counted = True\n trackableObjects[objectID] = to\n object_id = 'ID {}'.format(objectID)\n drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[\n 1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0, \n 255, 0), -1)\n total_str = 'Total counted: ' + str(total)\n drawTextCV2(output_img, total_str, (10, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n cv2.imshow(video_name, output_img)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n elif key == ord('p'):\n cv2.waitKey(0)\ncap.release()\ncv2.destroyAllWindows()\nprint('Exited')\n",
"step-4": "from tracking.centroidtracker import CentroidTracker\nfrom tracking.trackableobject import TrackableObject\nimport tensornets as nets\nimport cv2\nimport numpy as np\nimport time\nimport dlib\nimport tensorflow.compat.v1 as tf\nimport os\ntf.disable_v2_behavior()\nimg_size = 416\ninputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])\nmodel = nets.YOLOv3COCO(inputs, nets.Darknet19)\nct = CentroidTracker(maxDisappeared=5, maxDistance=50)\ntrackers = []\ntrackableObjects = {}\nskip_frames = 10\nconfidence_level = 0.4\ntotal = 0\nuse_original_video_size_as_output_size = True\nvideo_path = os.getcwd() + '/videos/M6 Motorway Traffic - Short version.mp4'\nvideo_name = os.path.basename(video_path)\nprint('Loading video {video_path}...'.format(video_path=video_path))\nif not os.path.exists(video_path):\n print('File does not exist. Exited.')\n exit()\nall_classes = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n 'sofa', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tvmonitor',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\nclasses = {(1): 'bicycle', (2): 'car', (3): 'motorbike', (5): 'bus', (7):\n 'truck'}\nwith tf.Session() as sess:\n sess.run(model.pretrained())\n cap = cv2.VideoCapture(video_path)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n width_scale = 1\n height_scale = 1\n if use_original_video_size_as_output_size:\n width_scale = width / img_size\n height_scale = height / img_size\n\n def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=\n width_scale, height_scale=height_scale):\n point1 = int(pt1[0] * width_scale), int(pt1[1] * height_scale)\n point2 = int(pt2[0] * width_scale), int(pt2[1] * height_scale)\n return cv2.rectangle(img, point1, point2, color, thickness)\n\n def drawTextCV2(img, text, pt, font, font_scale, color, lineType,\n width_scale=width_scale, height_scale=height_scale):\n pt = int(pt[0] * width_scale), int(pt[1] * height_scale)\n cv2.putText(img, text, pt, font, font_scale, color, lineType)\n\n def drawCircleCV2(img, center, radius, color, thickness, width_scale=\n width_scale, height_scale=height_scale):\n center = int(center[0] * width_scale), int(center[1] * height_scale)\n cv2.circle(img, center, radius, color, thickness)\n print('Loaded {video_path}. Width: {width}, Height: {height}'.format(\n video_path=video_path, width=width, height=height))\n skipped_frames_counter = 0\n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n print('Error reading frame. cap.read() returned {ret}'.format(ret))\n img = cv2.resize(frame, (img_size, img_size))\n output_img = frame if use_original_video_size_as_output_size else img\n tracker_rects = []\n if skipped_frames_counter == skip_frames:\n print('[DETECTING]')\n trackers = []\n skipped_frames_counter = 0\n np_img = np.array(img).reshape(-1, img_size, img_size, 3)\n start_time = time.time()\n predictions = sess.run(model.preds, {inputs: model.preprocess(\n np_img)})\n print('Detection took %s seconds' % (time.time() - start_time))\n detections = model.get_boxes(predictions, np_img.shape[1:3])\n np_detections = np.array(detections)\n for class_index in classes.keys():\n local_count = 0\n class_name = classes[class_index]\n for i in range(len(np_detections[class_index])):\n box = np_detections[class_index][i]\n if np_detections[class_index][i][4] >= confidence_level:\n print('Detected ', class_name,\n ' with confidence of ', np_detections[\n class_index][i][4])\n local_count += 1\n startX, startY, endX, endY = box[0], box[1], box[2\n ], box[3]\n drawRectangleCV2(output_img, (startX, startY), (\n endX, endY), (0, 255, 0), 1)\n drawTextCV2(output_img, class_name, (startX, startY\n ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(int(startX), int(startY), int\n (endX), int(endY))\n tracker.start_track(img, rect)\n trackers.append(tracker)\n print(class_name, ' : ', local_count)\n else:\n print('[TRACKING]')\n skipped_frames_counter += 1\n for tracker in trackers:\n tracker.update(img)\n pos = tracker.get_position()\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n tracker_rects.append((startX, startY, endX, endY))\n drawRectangleCV2(output_img, (startX, startY), (endX, endY),\n (255, 0, 0), 1)\n objects = ct.update(tracker_rects)\n for objectID, centroid in objects.items():\n to = trackableObjects.get(objectID, None)\n if to is None:\n to = TrackableObject(objectID, centroid)\n else:\n to.centroids.append(centroid)\n if not to.counted:\n total += 1\n to.counted = True\n trackableObjects[objectID] = to\n object_id = 'ID {}'.format(objectID)\n drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[\n 1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0, \n 255, 0), -1)\n total_str = 'Total counted: ' + str(total)\n drawTextCV2(output_img, total_str, (10, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n cv2.imshow(video_name, output_img)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n elif key == ord('p'):\n cv2.waitKey(0)\ncap.release()\ncv2.destroyAllWindows()\nprint('Exited')\n",
"step-5": "from tracking.centroidtracker import CentroidTracker\nfrom tracking.trackableobject import TrackableObject\nimport tensornets as nets\nimport cv2\nimport numpy as np\nimport time\nimport dlib\nimport tensorflow.compat.v1 as tf\nimport os\n\n# For 'disable_v2_behavior' see https://github.com/theislab/scgen/issues/14\ntf.disable_v2_behavior() \n\n# Image size must be '416x416' as YoloV3 network expects that specific image size as input\nimg_size = 416\ninputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])\nmodel = nets.YOLOv3COCO(inputs, nets.Darknet19)\n\nct = CentroidTracker(maxDisappeared=5, maxDistance=50) # Look into 'CentroidTracker' for further info about parameters\ntrackers = [] # List of all dlib trackers\ntrackableObjects = {} # Dictionary of trackable objects containing object's ID and its' corresponding centroid/s\nskip_frames = 10 # Numbers of frames to skip from detecting\nconfidence_level = 0.40 # The confidence level of a detection\ntotal = 0 # Total number of detected objects from classes of interest\nuse_original_video_size_as_output_size = True # Shows original video as output and not the 416x416 image that is used as yolov3 input (NOTE: Detection still happens with 416x416 img size but the output is displayed in original video size if this parameter is True)\n\nvideo_path = os.getcwd() + \"/videos/M6 Motorway Traffic - Short version.mp4\"\nvideo_name = os.path.basename(video_path)\n\nprint(\"Loading video {video_path}...\".format(video_path=video_path))\nif not os.path.exists(video_path):\n print(\"File does not exist. Exited.\")\n exit()\n\n# From https://github.com/experiencor/keras-yolo3/blob/master/yolo3_one_file_to_detect_them_all.py#L389\n# YoloV3 detects 80 classes represented below\nall_classes = [\"person\", \"bicycle\", \"car\", \"motorbike\", \"aeroplane\", \"bus\", \"train\", \"truck\", \\\n \"boat\", \"traffic light\", \"fire hydrant\", \"stop sign\", \"parking meter\", \"bench\", \\\n \"bird\", \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\", \\\n \"backpack\", \"umbrella\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\", \"skis\", \"snowboard\", \\\n \"sports ball\", \"kite\", \"baseball bat\", \"baseball glove\", \"skateboard\", \"surfboard\", \\\n \"tennis racket\", \"bottle\", \"wine glass\", \"cup\", \"fork\", \"knife\", \"spoon\", \"bowl\", \"banana\", \\\n \"apple\", \"sandwich\", \"orange\", \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\", \"cake\", \\\n \"chair\", \"sofa\", \"pottedplant\", \"bed\", \"diningtable\", \"toilet\", \"tvmonitor\", \"laptop\", \"mouse\", \\\n \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\", \"toaster\", \"sink\", \"refrigerator\", \\\n \"book\", \"clock\", \"vase\", \"scissors\", \"teddy bear\", \"hair drier\", \"toothbrush\"]\n\n# Classes of interest (with their corresponding indexes for easier looping)\nclasses = { 1 : 'bicycle', 2 : 'car', 3 : 'motorbike', 5 : 'bus', 7 : 'truck' }\n\nwith tf.Session() as sess:\n sess.run(model.pretrained())\n cap = cv2.VideoCapture(video_path)\n\n # Get video size (just for log purposes)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Scale used for output window size and net size\n width_scale = 1\n height_scale = 1\n \n if use_original_video_size_as_output_size:\n width_scale = width / img_size\n height_scale = height / img_size\n\n def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=width_scale, height_scale=height_scale):\n point1 = (int(pt1[0] * width_scale), int(pt1[1] * height_scale))\n point2 = (int(pt2[0] * width_scale), int(pt2[1] * height_scale))\n return cv2.rectangle(img, point1, point2, color, thickness)\n \n def drawTextCV2(img, text, pt, font, font_scale, color, lineType, width_scale=width_scale, height_scale=height_scale):\n pt = (int(pt[0] * width_scale), int(pt[1] * height_scale))\n cv2.putText(img, text, pt, font, font_scale, color, lineType)\n \n def drawCircleCV2(img, center, radius, color, thickness, width_scale=width_scale, height_scale=height_scale):\n center = (int(center[0] * width_scale), int(center[1] * height_scale))\n cv2.circle(img, center, radius, color, thickness)\n\n # Python 3.5.6 does not support f-strings (next line will generate syntax error)\n #print(f\"Loaded {video_path}. Width: {width}, Height: {height}\")\n print(\"Loaded {video_path}. Width: {width}, Height: {height}\".format(video_path=video_path, width=width, height=height))\n \n skipped_frames_counter = 0\n\n while(cap.isOpened()):\n ret, frame = cap.read()\n if ret == False:\n print(\"Error reading frame. cap.read() returned {ret}\".format(ret))\n \n # Frame must be resized to 'img_size' (because that's what YoloV3 accepts as input)\n img = cv2.resize(frame, (img_size, img_size))\n # Output image is used for drawing annotations (tracking rectangles and detected classes) on the image\n output_img = frame if use_original_video_size_as_output_size else img\n \n tracker_rects = []\n\n if skipped_frames_counter == skip_frames:\n \n # Detecting happens after number of frames have passes specified by 'skip_frames' variable value\n print(\"[DETECTING]\")\n \n trackers = []\n skipped_frames_counter = 0 # reset counter\n \n np_img = np.array(img).reshape(-1, img_size, img_size, 3)\n\n start_time=time.time()\n predictions = sess.run(model.preds, {inputs: model.preprocess(np_img)})\n print(\"Detection took %s seconds\" % (time.time() - start_time)) \n\n # model.get_boxes returns a 80 element array containing information about detected classes \n # each element contains a list of detected boxes, confidence level ...\n detections = model.get_boxes(predictions, np_img.shape[1:3])\n np_detections = np.array(detections)\n\n # Loop only through classes we are interested in\n for class_index in classes.keys():\n local_count = 0\n class_name = classes[class_index]\n\n # Loop through detected infos of a class we are interested in\n for i in range(len(np_detections[class_index])):\n box = np_detections[class_index][i] \n\n if np_detections[class_index][i][4] >= confidence_level:\n print(\"Detected \", class_name, \" with confidence of \", np_detections[class_index][i][4])\n\n local_count += 1\n startX, startY, endX, endY = box[0], box[1], box[2], box[3]\n \n drawRectangleCV2(output_img, (startX, startY), (endX, endY), (0, 255, 0), 1)\n drawTextCV2(output_img, class_name, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 1)\n\n # Construct a dlib rectangle object from the bounding box coordinates and then start the dlib correlation\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))\n tracker.start_track(img, rect)\n\n # Add the tracker to our list of trackers so we can utilize it during skip frames\n trackers.append(tracker)\n\n # Write the total number of detected objects for a given class on this frame\n print(class_name,\" : \", local_count)\n else:\n # If detection is not happening then track previously detected objects (if any)\n print(\"[TRACKING]\")\n\n skipped_frames_counter += 1 # Increase the number frames for which we did not use detection\n\n # Loop through tracker, update each of them and display their rectangle\n for tracker in trackers:\n tracker.update(img)\n pos = tracker.get_position()\n\n\t\t\t # Unpack the position object\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n \n # Add the bounding box coordinates to the tracking rectangles list\n tracker_rects.append((startX, startY, endX, endY))\n \n # Draw tracking rectangles\n drawRectangleCV2(output_img, (startX, startY), (endX, endY), (255, 0, 0), 1)\n\n\n\n # Use the centroid tracker to associate the (1) old object centroids with (2) the newly computed object centroids\n objects = ct.update(tracker_rects)\n\n # Loop over the tracked objects\n for (objectID, centroid) in objects.items():\n # Check to see if a trackable object exists for the current object ID\n to = trackableObjects.get(objectID, None)\n\n if to is None:\n # If there is no existing trackable object, create one\n to = TrackableObject(objectID, centroid)\n else:\n to.centroids.append(centroid)\n\n # If the object has not been counted, count it and mark it as counted\n if not to.counted:\n total += 1\n to.counted = True\n\n # Store the trackable object in our dictionary\n trackableObjects[objectID] = to\n\n # Draw both the ID of the object and the centroid of the object on the output frame\n object_id = \"ID {}\".format(objectID)\n drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0, 255, 0), -1)\n\n # Display the total count so far\n total_str = \"Total counted: \" + str(total)\n drawTextCV2(output_img, total_str, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n # Display the current frame (with all annotations drawn up to this point)\n cv2.imshow(video_name, output_img)\n \n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'): # QUIT (exits)\n break \n elif key == ord('p'):\n cv2.waitKey(0) # PAUSE (Enter any key to continue)\n\ncap.release()\ncv2.destroyAllWindows()\nprint(\"Exited\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j
][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j
][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
<|reserved_special_token_0|>
print(s.removeStones(temp_value))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j
][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
s = Solution()
temp_value = [[3, 2], [3, 1], [4, 4], [1, 1], [0, 2], [4, 0]]
print(s.removeStones(temp_value))
<|reserved_special_token_1|>
import collections
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j
][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
s = Solution()
temp_value = [[3, 2], [3, 1], [4, 4], [1, 1], [0, 2], [4, 0]]
print(s.removeStones(temp_value))
<|reserved_special_token_1|>
# 문제 풀이 진행중..(나중에 재도전)
import collections
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
# 전체 연결점 개수 확인한다.
# 개수가 적은 것 부터 처리한다
# # 연결된 게 0개인 애들은 제외
#
# data init
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
# set data(connecting count of stones)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
s = Solution()
# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2],[2,3]]
# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2]]
# temp_value = [[0,0],[0,2],[1,1],[2,0],[2,2]]
temp_value = [[3,2],[3,1],[4,4],[1,1],[0,2],[4,0]]
print(s.removeStones(temp_value))
|
flexible
|
{
"blob_id": "896329a8b14d79f849e4a8c31c697f3981395790",
"index": 3327,
"step-1": "<mask token>\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\n<mask token>\nprint(s.removeStones(temp_value))\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\ns = Solution()\ntemp_value = [[3, 2], [3, 1], [4, 4], [1, 1], [0, 2], [4, 0]]\nprint(s.removeStones(temp_value))\n",
"step-4": "import collections\n\n\nclass Solution(object):\n\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j\n ][1]:\n connect_count += 1\n stones_share_list[i] = connect_count\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n if connect_sum is 0:\n return 0\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\ns = Solution()\ntemp_value = [[3, 2], [3, 1], [4, 4], [1, 1], [0, 2], [4, 0]]\nprint(s.removeStones(temp_value))\n",
"step-5": "# 문제 풀이 진행중..(나중에 재도전)\nimport collections\nclass Solution(object):\n def removeStones(self, stones):\n \"\"\"\n :type stones: List[List[int]]\n :rtype: int\n \"\"\"\n # 전체 연결점 개수 확인한다.\n # 개수가 적은 것 부터 처리한다\n # # 연결된 게 0개인 애들은 제외\n #\n\n # data init\n stones_share_list = []\n for i in range(len(stones)):\n stones_share_list.append(0)\n\n # set data(connecting count of stones)\n for i in range(len(stones)):\n check_stone = stones[i]\n connect_count = 0\n for j in range(len(stones)):\n if i is j:\n continue\n if check_stone[0] is stones[j][0] or check_stone[1] is stones[j][1]:\n connect_count += 1\n\n stones_share_list[i] = connect_count\n\n connect_sum = 0\n for share in stones_share_list:\n connect_sum += share\n\n if connect_sum is 0:\n return 0\n\n island = 0\n print(stones_share_list)\n for connect in stones_share_list:\n if connect is 0:\n island += 1\n print(island)\n return len(stones) - (island + 1)\n\n\ns = Solution()\n\n# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2],[2,3]]\n# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2]]\n# temp_value = [[0,0],[0,2],[1,1],[2,0],[2,2]]\ntemp_value = [[3,2],[3,1],[4,4],[1,1],[0,2],[4,0]]\nprint(s.removeStones(temp_value))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
t = eval(input())
while t:
t -= 1
y = []
z = []
x = str(input())
for i in range(len(x)):
if (not int(i)%2):
y.append(x[i])
else:
z.append(x[i])
print("".join(y) + " " + "".join(z))
|
normal
|
{
"blob_id": "ac32fb5fcd71790f9dbf0794992a9dc92a202c9b",
"index": 7972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if not int(i) % 2:\n y.append(x[i])\n else:\n z.append(x[i])\n print(''.join(y) + ' ' + ''.join(z))\n",
"step-3": "t = eval(input())\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if not int(i) % 2:\n y.append(x[i])\n else:\n z.append(x[i])\n print(''.join(y) + ' ' + ''.join(z))\n",
"step-4": "t = eval(input())\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if (not int(i)%2):\n y.append(x[i])\n else:\n z.append(x[i])\n print(\"\".join(y) + \" \" + \"\".join(z))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from abc import abstractmethod
class Environment:
@abstractmethod
def __init__(self, agent):
pass
@abstractmethod
def execute_step(self, n=1):
pass
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
|
normal
|
{
"blob_id": "8698aedc5c8671f46c73898a7188440254b79bbf",
"index": 307,
"step-1": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <mask token>\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-3": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-4": "from abc import abstractmethod\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Ticker', required_options=[
'address'], **options)
self.size = 4096
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 2048:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
<|reserved_special_token_0|>
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CB030Remap(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Remap', required_options=[
'address'], **options)
self.size = 4096
self._did_remap = False
self._dram_size = args.dram_size
def access(self, operation, offset, size, value):
if not self._did_remap:
self.emu.remove_memory(base=0)
self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)
return 0
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Ticker', required_options=[
'address'], **options)
self.size = 4096
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 2048:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
def _start(self):
if not self._ticker_on:
self.callback_every(self._tick_cycles, 'tick', self._tick)
self._ticker_on = True
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_arguments(parser):
parser.add_argument('--rom', type=str, help='ROM image')
parser.add_argument('--dram-size', type=int, default=16, help=
'DRAM size; boards may have 16, 64 or 128M')
parser.add_argument('--cf-width', type=int, default=8, help=
'CompactFlash interface width, 8 or 16')
CompactFlash.add_arguments(parser)
MC68681.add_arguments(parser)
class CB030Remap(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Remap', required_options=[
'address'], **options)
self.size = 4096
self._did_remap = False
self._dram_size = args.dram_size
def access(self, operation, offset, size, value):
if not self._did_remap:
self.emu.remove_memory(base=0)
self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)
return 0
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Ticker', required_options=[
'address'], **options)
self.size = 4096
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 2048:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
def _start(self):
if not self._ticker_on:
self.callback_every(self._tick_cycles, 'tick', self._tick)
self._ticker_on = True
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
def configure(args):
"""create and configure an emulator"""
emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)
emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_memory(base=4261412864, size=512 * 1024, writable=False,
from_file=args.rom)
emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,
register_arrangement='16-bit-doubled')
emu.add_device(args, CompactFlash, address=4294959104,
register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')
emu.add_device(args, CB030Remap, address=4294934528)
emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)
return emu
<|reserved_special_token_1|>
from emulator import Emulator
from device import Device
from devices.compactflash import CompactFlash
from devices.mc68681 import MC68681
from musashi import m68k
def add_arguments(parser):
parser.add_argument('--rom', type=str, help='ROM image')
parser.add_argument('--dram-size', type=int, default=16, help=
'DRAM size; boards may have 16, 64 or 128M')
parser.add_argument('--cf-width', type=int, default=8, help=
'CompactFlash interface width, 8 or 16')
CompactFlash.add_arguments(parser)
MC68681.add_arguments(parser)
class CB030Remap(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Remap', required_options=[
'address'], **options)
self.size = 4096
self._did_remap = False
self._dram_size = args.dram_size
def access(self, operation, offset, size, value):
if not self._did_remap:
self.emu.remove_memory(base=0)
self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)
return 0
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args, name='CB030Ticker', required_options=[
'address'], **options)
self.size = 4096
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 2048:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
def _start(self):
if not self._ticker_on:
self.callback_every(self._tick_cycles, 'tick', self._tick)
self._ticker_on = True
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
def configure(args):
"""create and configure an emulator"""
emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)
emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_memory(base=4261412864, size=512 * 1024, writable=False,
from_file=args.rom)
emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,
register_arrangement='16-bit-doubled')
emu.add_device(args, CompactFlash, address=4294959104,
register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')
emu.add_device(args, CB030Remap, address=4294934528)
emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)
return emu
<|reserved_special_token_1|>
from emulator import Emulator
from device import Device
from devices.compactflash import CompactFlash
from devices.mc68681 import MC68681
from musashi import m68k
def add_arguments(parser):
parser.add_argument('--rom',
type=str,
help='ROM image')
parser.add_argument('--dram-size',
type=int,
default=16,
help='DRAM size; boards may have 16, 64 or 128M')
parser.add_argument('--cf-width',
type=int,
default=8,
help='CompactFlash interface width, 8 or 16')
CompactFlash.add_arguments(parser)
MC68681.add_arguments(parser)
class CB030Remap(Device):
def __init__(self, args, **options):
super().__init__(args=args,
name='CB030Remap',
required_options=['address'],
**options)
# no registers, just a 4k aperture
self.size = 0x1000
self._did_remap = False
self._dram_size = args.dram_size
def access(self, operation, offset, size, value):
if not self._did_remap:
# remove the low alias of the EEPROM
self.emu.remove_memory(base=0)
# and add the previously-masked DRAM
self.emu.add_memory(base=0x0000000, size=self._dram_size * 1024 * 1024)
return 0
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args,
name='CB030Ticker',
required_options=['address'],
**options)
# no registers, just a 4k aperture
self.size = 0x1000
# core clock @ 24MHz, 100Hz tick rate
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 0x800:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
def _start(self):
if not self._ticker_on:
self.callback_every(self._tick_cycles, 'tick', self._tick)
self._ticker_on = True
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
def configure(args):
"""create and configure an emulator"""
emu = Emulator(args,
cpu='68030',
frequency=24 * 1000 * 1000)
# initially only the EEPROM exists; aliased at 0 all the way up to 0xfe000000
# we only map the low and high aliases, as the intermediates aren't interesting
emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_memory(base=0xfe000000, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_device(args,
MC68681,
address=0xfffff000,
interrupt=m68k.IRQ_2,
register_arrangement='16-bit-doubled')
emu.add_device(args,
CompactFlash,
address=0xffffe000,
register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')
emu.add_device(args,
CB030Remap,
address=0xffff8000)
emu.add_device(args,
CB030Ticker,
address=0xffff9000,
interrupt=m68k.IRQ_6)
return emu
|
flexible
|
{
"blob_id": "9eef202a42bfc10b2f52d1b9153d664c5046c13f",
"index": 1965,
"step-1": "<mask token>\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n <mask token>\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom', type=str, help='ROM image')\n parser.add_argument('--dram-size', type=int, default=16, help=\n 'DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width', type=int, default=8, help=\n 'CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=4261412864, size=512 * 1024, writable=False,\n from_file=args.rom)\n emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args, CompactFlash, address=4294959104,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args, CB030Remap, address=4294934528)\n emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)\n return emu\n",
"step-4": "from emulator import Emulator\nfrom device import Device\nfrom devices.compactflash import CompactFlash\nfrom devices.mc68681 import MC68681\nfrom musashi import m68k\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom', type=str, help='ROM image')\n parser.add_argument('--dram-size', type=int, default=16, help=\n 'DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width', type=int, default=8, help=\n 'CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=4261412864, size=512 * 1024, writable=False,\n from_file=args.rom)\n emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args, CompactFlash, address=4294959104,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args, CB030Remap, address=4294934528)\n emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)\n return emu\n",
"step-5": "from emulator import Emulator\nfrom device import Device\nfrom devices.compactflash import CompactFlash\nfrom devices.mc68681 import MC68681\nfrom musashi import m68k\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom',\n type=str,\n help='ROM image')\n parser.add_argument('--dram-size',\n type=int,\n default=16,\n help='DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width',\n type=int,\n default=8,\n help='CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n def __init__(self, args, **options):\n super().__init__(args=args,\n name='CB030Remap',\n required_options=['address'],\n **options)\n\n # no registers, just a 4k aperture\n self.size = 0x1000\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n # remove the low alias of the EEPROM\n self.emu.remove_memory(base=0)\n\n # and add the previously-masked DRAM\n self.emu.add_memory(base=0x0000000, size=self._dram_size * 1024 * 1024)\n\n return 0\n\n\nclass CB030Ticker(Device):\n def __init__(self, args, **options):\n super().__init__(args=args,\n name='CB030Ticker',\n required_options=['address'],\n **options)\n\n # no registers, just a 4k aperture\n self.size = 0x1000\n # core clock @ 24MHz, 100Hz tick rate\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 0x800:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n\n emu = Emulator(args,\n cpu='68030',\n frequency=24 * 1000 * 1000)\n # initially only the EEPROM exists; aliased at 0 all the way up to 0xfe000000\n # we only map the low and high aliases, as the intermediates aren't interesting\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=0xfe000000, size=512 * 1024, writable=False, from_file=args.rom)\n\n emu.add_device(args,\n MC68681,\n address=0xfffff000,\n interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args,\n CompactFlash,\n address=0xffffe000,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args,\n CB030Remap,\n address=0xffff8000)\n emu.add_device(args,\n CB030Ticker,\n address=0xffff9000,\n interrupt=m68k.IRQ_6)\n return emu\n",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_good(arr):
for i in range(1, len(arr) // 2 + 1):
if arr[-i:] == arr[-(i * 2):-i]:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solve(bt):
if len(bt) == n:
print(*bt, sep='')
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr) // 2 + 1):
if arr[-i:] == arr[-(i * 2):-i]:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solve(bt):
if len(bt) == n:
print(*bt, sep='')
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr) // 2 + 1):
if arr[-i:] == arr[-(i * 2):-i]:
return False
return True
if __name__ == '__main__':
n = int(input())
solve([1])
<|reserved_special_token_1|>
def solve(bt):
if len(bt) == n:
print(*bt, sep="")
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr)//2+1):
if arr[-i:] == arr[-(i*2):-i]:
return False
return True
if __name__ == "__main__":
n = int(input())
solve([1])
|
flexible
|
{
"blob_id": "65d5cee6899b0b75474e3898459bf2cfa8b3635b",
"index": 1042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\nif __name__ == '__main__':\n n = int(input())\n solve([1])\n",
"step-5": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep=\"\")\n exit()\n\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr)//2+1):\n if arr[-i:] == arr[-(i*2):-i]:\n return False\n return True\n\nif __name__ == \"__main__\":\n n = int(input())\n\n solve([1])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ClientConnector(object):
<|reserved_special_token_0|>
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@port.setter
def port(self, value):
assert not self.is_connected(
), 'Port should not be changed will connected.'
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print('No host selected, starting local instance.')
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = (quartjes.controllers.stock_exchange2.
StockExchange2())
else:
reactor.callLater(0, self._connect)
if not reactor.running:
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface('database')
self._stock_exchange = self.get_service_interface('stock_exchange')
<|reserved_special_token_0|>
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
self._connection = reactor.connectTCP(self.host, self.port, self.
factory)
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name='ReactorThread')
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(
), 'Host should not be changed will connected.'
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(
), 'Port should not be changed will connected.'
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print('No host selected, starting local instance.')
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = (quartjes.controllers.stock_exchange2.
StockExchange2())
else:
reactor.callLater(0, self._connect)
if not reactor.running:
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface('database')
self._stock_exchange = self.get_service_interface('stock_exchange')
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
self._connection = reactor.connectTCP(self.host, self.port, self.
factory)
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name='ReactorThread')
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Rob van der Most'
__docformat__ = 'restructuredtext en'
<|reserved_special_token_0|>
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(
), 'Host should not be changed will connected.'
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(
), 'Port should not be changed will connected.'
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print('No host selected, starting local instance.')
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = (quartjes.controllers.stock_exchange2.
StockExchange2())
else:
reactor.callLater(0, self._connect)
if not reactor.running:
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface('database')
self._stock_exchange = self.get_service_interface('stock_exchange')
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
self._connection = reactor.connectTCP(self.host, self.port, self.
factory)
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name='ReactorThread')
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0)
def tk_event_listener(F):
"""
Make a method able to receive events from the connector while running in
the TK mainloop.
"""
def listener(self, *pargs, **kwargs):
self._event_queue.put((F, self, pargs, kwargs))
return listener
def tk_prepare_instance_for_events(instance):
"""
Prepare a class to receive events from outside the tk mainloop.
Call this from the TK mainloop before any events are going to be received.
Decorate methods to call using tk_event_listener
"""
def listener():
try:
while 1:
method, self, pargs, kwargs = instance._event_queue.get_nowait(
)
method(self, *pargs, **kwargs)
except Queue.Empty:
pass
instance.after(100, listener)
import Queue
instance._event_queue = Queue.Queue()
instance.after(100, listener)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Rob van der Most'
__docformat__ = 'restructuredtext en'
from quartjes.connector.protocol import QuartjesClientFactory
from twisted.internet import reactor, threads
from threading import Thread
from quartjes.connector.services import ServiceInterface
import quartjes.controllers.database
import quartjes.controllers.stock_exchange2
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(
), 'Host should not be changed will connected.'
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(
), 'Port should not be changed will connected.'
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print('No host selected, starting local instance.')
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = (quartjes.controllers.stock_exchange2.
StockExchange2())
else:
reactor.callLater(0, self._connect)
if not reactor.running:
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface('database')
self._stock_exchange = self.get_service_interface('stock_exchange')
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
self._connection = reactor.connectTCP(self.host, self.port, self.
factory)
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name='ReactorThread')
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0)
def tk_event_listener(F):
"""
Make a method able to receive events from the connector while running in
the TK mainloop.
"""
def listener(self, *pargs, **kwargs):
self._event_queue.put((F, self, pargs, kwargs))
return listener
def tk_prepare_instance_for_events(instance):
"""
Prepare a class to receive events from outside the tk mainloop.
Call this from the TK mainloop before any events are going to be received.
Decorate methods to call using tk_event_listener
"""
def listener():
try:
while 1:
method, self, pargs, kwargs = instance._event_queue.get_nowait(
)
method(self, *pargs, **kwargs)
except Queue.Empty:
pass
instance.after(100, listener)
import Queue
instance._event_queue = Queue.Queue()
instance.after(100, listener)
<|reserved_special_token_1|>
"""
Client component of the Quartjes connector. Use the ClientConnector to create
a connection to the Quartjes server.
Usage
-----
Create an instance of this object with the host and port to connect to.
Call the start() method to establish the connection.
Now the database and the stock_exchange variable can be used to communicate
with the server.
If you do not wish to connect to a server, but run a local server instead,
create the object without any arguments.
Example
-------
>>> conn = ClientConnector("192.168.1.1")
>>> conn.start()
>>> conn.database.get_drinks()
Available server methods
------------------------
Currently two server objects are made available upon connection. Please see the
documentation for the server object for available methods and events:
* database: :class:`quartjes.controllers.database.Database`
* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`
Advanced
--------
Use the method get_service_interface to retrieve additional interfaces to a server side
service.
As long as the connector is running, it will keep trying to reconnect any
lost connections using an exponential back-off.
ClientConnector class
---------------------
"""
__author__ = "Rob van der Most"
__docformat__ = "restructuredtext en"
from quartjes.connector.protocol import QuartjesClientFactory
from twisted.internet import reactor, threads
from threading import Thread
from quartjes.connector.services import ServiceInterface
import quartjes.controllers.database
import quartjes.controllers.stock_exchange2
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(), "Host should not be changed will connected."
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(), "Port should not be changed will connected."
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print("No host selected, starting local instance.")
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()
else:
reactor.callLater(0, self._connect) #@UndefinedVariable
if not reactor.running: #@UndefinedVariable
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface("database")
self._stock_exchange = self.get_service_interface("stock_exchange")
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
#threads.blockingCallFromThread(reactor, self._factory.stopTrying)
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
#print("Connecting...")
self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name="ReactorThread")
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0) #@UndefinedVariable
def tk_event_listener(F):
"""
Make a method able to receive events from the connector while running in
the TK mainloop.
"""
def listener(self, *pargs, **kwargs):
self._event_queue.put((F, self, pargs, kwargs))
return listener
def tk_prepare_instance_for_events(instance):
"""
Prepare a class to receive events from outside the tk mainloop.
Call this from the TK mainloop before any events are going to be received.
Decorate methods to call using tk_event_listener
"""
def listener():
try:
while 1:
(method, self, pargs, kwargs) = instance._event_queue.get_nowait()
method(self, *pargs, **kwargs)
except Queue.Empty:
pass
instance.after(100, listener)
import Queue
instance._event_queue = Queue.Queue()
instance.after(100, listener)
|
flexible
|
{
"blob_id": "a8f200e0ae1252df4ad6560e5756347cd0e4c8ba",
"index": 5034,
"step-1": "<mask token>\n\n\nclass ClientConnector(object):\n <mask token>\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n <mask token>\n <mask token>\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n <mask token>\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n__author__ = 'Rob van der Most'\n__docformat__ = 'restructuredtext en'\n<mask token>\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n return listener\n\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n\n def listener():\n try:\n while 1:\n method, self, pargs, kwargs = instance._event_queue.get_nowait(\n )\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n",
"step-4": "<mask token>\n__author__ = 'Rob van der Most'\n__docformat__ = 'restructuredtext en'\nfrom quartjes.connector.protocol import QuartjesClientFactory\nfrom twisted.internet import reactor, threads\nfrom threading import Thread\nfrom quartjes.connector.services import ServiceInterface\nimport quartjes.controllers.database\nimport quartjes.controllers.stock_exchange2\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n return listener\n\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n\n def listener():\n try:\n while 1:\n method, self, pargs, kwargs = instance._event_queue.get_nowait(\n )\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n",
"step-5": "\"\"\"\nClient component of the Quartjes connector. Use the ClientConnector to create\na connection to the Quartjes server.\n\nUsage\n-----\nCreate an instance of this object with the host and port to connect to.\nCall the start() method to establish the connection.\nNow the database and the stock_exchange variable can be used to communicate\nwith the server.\n\nIf you do not wish to connect to a server, but run a local server instead,\ncreate the object without any arguments.\n\nExample\n-------\n>>> conn = ClientConnector(\"192.168.1.1\")\n>>> conn.start()\n>>> conn.database.get_drinks()\n\nAvailable server methods\n------------------------\n\nCurrently two server objects are made available upon connection. Please see the\ndocumentation for the server object for available methods and events:\n\n* database: :class:`quartjes.controllers.database.Database`\n* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`\n\nAdvanced\n--------\n\nUse the method get_service_interface to retrieve additional interfaces to a server side\nservice.\n\nAs long as the connector is running, it will keep trying to reconnect any\nlost connections using an exponential back-off.\n\nClientConnector class\n---------------------\n\n\"\"\"\n__author__ = \"Rob van der Most\"\n__docformat__ = \"restructuredtext en\"\n\nfrom quartjes.connector.protocol import QuartjesClientFactory\nfrom twisted.internet import reactor, threads\nfrom threading import Thread\nfrom quartjes.connector.services import ServiceInterface\nimport quartjes.controllers.database\nimport quartjes.controllers.stock_exchange2\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n \n @host.setter\n def host(self, value):\n assert not self.is_connected(), \"Host should not be changed will connected.\"\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n \n @port.setter\n def port(self, value):\n assert not self.is_connected(), \"Port should not be changed will connected.\"\n self._port = value\n \n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n \n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n \n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n \n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print(\"No host selected, starting local instance.\")\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()\n else:\n reactor.callLater(0, self._connect) #@UndefinedVariable\n if not reactor.running: #@UndefinedVariable\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n\n self._database = self.get_service_interface(\"database\")\n self._stock_exchange = self.get_service_interface(\"stock_exchange\")\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n #threads.blockingCallFromThread(reactor, self._factory.stopTrying)\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n def __init__(self):\n Thread.__init__(self, name=\"ReactorThread\")\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0) #@UndefinedVariable\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n \n return listener\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n def listener():\n try:\n while 1:\n (method, self, pargs, kwargs) = instance._event_queue.get_nowait()\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n \n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n \n",
"step-ids": [
12,
16,
19,
20,
21
]
}
|
[
12,
16,
19,
20,
21
] |
# fonction pour voir quel est le plus grand entre l'energie limite et l'enerve potentiel
def ep (m,h,el,g=9.8):
E=m*h*g
if E<el:
print ("le plus grand est : el")
else:
print ("le plus grand est : E")
ep(3,4,5)
#fontion fibonaci 0 1 1 2 3 5 8 13
def fibonaci(n):
for i in range(0,n,):
j= 1
i = i + j
j=i
return fibonaci
|
normal
|
{
"blob_id": "869284fa531a93c1b9812ed90a560d0bb2f87e97",
"index": 255,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fibonaci(n):\n for i in range(0, n):\n j = 1\n i = i + j\n j = i\n return fibonaci\n",
"step-3": "def ep(m, h, el, g=9.8):\n E = m * h * g\n if E < el:\n print('le plus grand est : el')\n else:\n print('le plus grand est : E')\n\n\n<mask token>\n\n\ndef fibonaci(n):\n for i in range(0, n):\n j = 1\n i = i + j\n j = i\n return fibonaci\n",
"step-4": "def ep(m, h, el, g=9.8):\n E = m * h * g\n if E < el:\n print('le plus grand est : el')\n else:\n print('le plus grand est : E')\n\n\nep(3, 4, 5)\n\n\ndef fibonaci(n):\n for i in range(0, n):\n j = 1\n i = i + j\n j = i\n return fibonaci\n",
"step-5": "# fonction pour voir quel est le plus grand entre l'energie limite et l'enerve potentiel\ndef ep (m,h,el,g=9.8):\n E=m*h*g\n if E<el:\n print (\"le plus grand est : el\")\n else:\n print (\"le plus grand est : E\")\n\nep(3,4,5)\n\n#fontion fibonaci 0 1 1 2 3 5 8 13\n\ndef fibonaci(n):\n for i in range(0,n,):\n j= 1\n i = i + j\n j=i\n return fibonaci\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def four_Ow_four(error):
"""
method to render the 404 error page
"""
return render_template('fourOwfour.html'), 404
<|reserved_special_token_1|>
def four_Ow_four(error):
'''
method to render the 404 error page
'''
return render_template('fourOwfour.html'),404
|
flexible
|
{
"blob_id": "851cfd4e71ffd2d5fed33616abca4444474669a3",
"index": 4508,
"step-1": "<mask token>\n",
"step-2": "def four_Ow_four(error):\n \"\"\"\n method to render the 404 error page\n \"\"\"\n return render_template('fourOwfour.html'), 404\n",
"step-3": "def four_Ow_four(error):\n '''\n method to render the 404 error page\n '''\n return render_template('fourOwfour.html'),404",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('threads', '0007_auto_20180430_1617')]
operations = [migrations.AlterField(model_name='thread', name=
'last_activity', field=models.DateTimeField(default=django.utils.
timezone.now))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [('threads', '0007_auto_20180430_1617')]
operations = [migrations.AlterField(model_name='thread', name=
'last_activity', field=models.DateTimeField(default=django.utils.
timezone.now))]
<|reserved_special_token_1|>
# Generated by Django 2.0.3 on 2018-04-30 16:25
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('threads', '0007_auto_20180430_1617'),
]
operations = [
migrations.AlterField(
model_name='thread',
name='last_activity',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
flexible
|
{
"blob_id": "6cd250b3bffd87657ec7cc28eaffe817c6d9f73f",
"index": 9794,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('threads', '0007_auto_20180430_1617')]\n operations = [migrations.AlterField(model_name='thread', name=\n 'last_activity', field=models.DateTimeField(default=django.utils.\n timezone.now))]\n",
"step-4": "from django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('threads', '0007_auto_20180430_1617')]\n operations = [migrations.AlterField(model_name='thread', name=\n 'last_activity', field=models.DateTimeField(default=django.utils.\n timezone.now))]\n",
"step-5": "# Generated by Django 2.0.3 on 2018-04-30 16:25\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('threads', '0007_auto_20180430_1617'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='thread',\n name='last_activity',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def ispalindrome(s):
if len(s) <= 1:
return True
elif s[0] != s[-1]:
return False
else:
return ispalindrome(s[1:-1])
|
flexible
|
{
"blob_id": "c20a414f7f96a96f6e458fc27e5d2c7ac7ab05cf",
"index": 8574,
"step-1": "<mask token>\n",
"step-2": "def ispalindrome(s):\n if len(s) <= 1:\n return True\n elif s[0] != s[-1]:\n return False\n else:\n return ispalindrome(s[1:-1])\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from fastapi import FastAPI
from app.router.routes import initRoutes
from app.cors.cors import initCors
app = FastAPI(debug=True,title="Recipe API")
initCors(app)
initRoutes(app)
|
normal
|
{
"blob_id": "1857d76b8c68c58d2d721de529811a6aeb09fcbb",
"index": 5407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ninitCors(app)\ninitRoutes(app)\n",
"step-3": "<mask token>\napp = FastAPI(debug=True, title='Recipe API')\ninitCors(app)\ninitRoutes(app)\n",
"step-4": "from fastapi import FastAPI\nfrom app.router.routes import initRoutes\nfrom app.cors.cors import initCors\napp = FastAPI(debug=True, title='Recipe API')\ninitCors(app)\ninitRoutes(app)\n",
"step-5": "from fastapi import FastAPI\nfrom app.router.routes import initRoutes\nfrom app.cors.cors import initCors\n\napp = FastAPI(debug=True,title=\"Recipe API\")\ninitCors(app)\ninitRoutes(app)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LoginForm(FlaskForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginForm(FlaskForm):
<|reserved_special_token_0|>
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
<|reserved_special_token_1|>
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from flask_wtf import FlaskForm
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
<|reserved_special_token_1|>
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from flask_wtf import FlaskForm
# ...
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
|
flexible
|
{
"blob_id": "6ad2014191215dac97ad6fc6a026512c3d1866dc",
"index": 8244,
"step-1": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n <mask token>\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])\n",
"step-3": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n \"\"\"登录表单类\"\"\"\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])\n",
"step-4": "from wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf import FlaskForm\n\n\nclass LoginForm(FlaskForm):\n \"\"\"登录表单类\"\"\"\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])\n",
"step-5": "from wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf import FlaskForm\n\n\n# ...\nclass LoginForm(FlaskForm):\n \"\"\"登录表单类\"\"\"\n username = StringField('用户名', validators=[DataRequired()])\n password = PasswordField('密码', validators=[DataRequired()])",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def GetDateTimeString():
dt = str(datetime.datetime.now()).split('.')[0]
clean = dt.replace(' ', '_').replace(':', '_')
return clean
def GetBackground(bgNumber):
bgImage = '/home/pi/pibooth/backgrounds/space.jpg'
return cv2.imread(bgImage)
def GetImage(bg):
ret, frame = cam.read()
sensitivity = 1
lowerRange = np.array([0, 0, 255 - sensitivity])
upperRange = np.array([255, sensitivity, 255])
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image_mask = cv2.inRange(hsv, lowerRange, upperRange)
bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)
fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))
img = cv2.add(bg_mask, fg_mask)
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def GetDateTimeString():
dt = str(datetime.datetime.now()).split('.')[0]
clean = dt.replace(' ', '_').replace(':', '_')
return clean
def GetBackground(bgNumber):
bgImage = '/home/pi/pibooth/backgrounds/space.jpg'
return cv2.imread(bgImage)
def GetImage(bg):
ret, frame = cam.read()
sensitivity = 1
lowerRange = np.array([0, 0, 255 - sensitivity])
upperRange = np.array([255, sensitivity, 255])
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image_mask = cv2.inRange(hsv, lowerRange, upperRange)
bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)
fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))
img = cv2.add(bg_mask, fg_mask)
return img
cv2.namedWindow('Photobooth', cv2.WND_PROP_FULLSCREEN)
<|reserved_special_token_0|>
cam.set(3, width)
cam.set(4, height)
<|reserved_special_token_0|>
while True:
img = GetImage(bg)
key = cv2.waitKey(1)
if clicked == True:
elapsed = datetime.datetime.now() - clickedTime
secs = int(elapsed.total_seconds())
if secs > countdownSeconds:
clicked = False
cv2.imwrite('/home/pi/pibooth/newImages/img_' +
GetDateTimeString() + '.jpg', img)
cv2.imshow('Photobooth', img)
time.sleep(displayPhotoSeconds)
bgNumber += 1
bg = GetBackground(bgNumber)
else:
if secs - 5 == 1:
text = 'Say cheese!'
else:
text = str(5 - secs) + '...'
textSize, base = cv2.getTextSize(text, fontFace, fontScale,
thickness)
textWidth = int((width - textSize[0]) / 2)
textHeight = int((height + textSize[1]) / 2)
cv2.putText(img, text, (textWidth, textHeight), fontFace,
fontScale, (255, 255, 255), thickness)
elif key == 32:
clickedTime = datetime.datetime.now()
clicked = True
elif key == 27:
break
elif bgNumber == 4:
break
cv2.imshow('Photobooth', img)
cv2.destroyAllWindows()
cam.release()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def GetDateTimeString():
dt = str(datetime.datetime.now()).split('.')[0]
clean = dt.replace(' ', '_').replace(':', '_')
return clean
def GetBackground(bgNumber):
bgImage = '/home/pi/pibooth/backgrounds/space.jpg'
return cv2.imread(bgImage)
def GetImage(bg):
ret, frame = cam.read()
sensitivity = 1
lowerRange = np.array([0, 0, 255 - sensitivity])
upperRange = np.array([255, sensitivity, 255])
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image_mask = cv2.inRange(hsv, lowerRange, upperRange)
bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)
fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))
img = cv2.add(bg_mask, fg_mask)
return img
cv2.namedWindow('Photobooth', cv2.WND_PROP_FULLSCREEN)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 4
countdownSeconds = 5
displayPhotoSeconds = 5
width = 640
height = 480
cam = cv2.VideoCapture(0)
cam.set(3, width)
cam.set(4, height)
bgNumber = 0
new_img_nums = random.sample(range(1, 9), 4)
bg = GetBackground(bgNumber)
clicked = False
clickedTime = {}
while True:
img = GetImage(bg)
key = cv2.waitKey(1)
if clicked == True:
elapsed = datetime.datetime.now() - clickedTime
secs = int(elapsed.total_seconds())
if secs > countdownSeconds:
clicked = False
cv2.imwrite('/home/pi/pibooth/newImages/img_' +
GetDateTimeString() + '.jpg', img)
cv2.imshow('Photobooth', img)
time.sleep(displayPhotoSeconds)
bgNumber += 1
bg = GetBackground(bgNumber)
else:
if secs - 5 == 1:
text = 'Say cheese!'
else:
text = str(5 - secs) + '...'
textSize, base = cv2.getTextSize(text, fontFace, fontScale,
thickness)
textWidth = int((width - textSize[0]) / 2)
textHeight = int((height + textSize[1]) / 2)
cv2.putText(img, text, (textWidth, textHeight), fontFace,
fontScale, (255, 255, 255), thickness)
elif key == 32:
clickedTime = datetime.datetime.now()
clicked = True
elif key == 27:
break
elif bgNumber == 4:
break
cv2.imshow('Photobooth', img)
cv2.destroyAllWindows()
cam.release()
<|reserved_special_token_1|>
import numpy as np
import cv2
import datetime
import random
import time
import logging
def GetDateTimeString():
dt = str(datetime.datetime.now()).split('.')[0]
clean = dt.replace(' ', '_').replace(':', '_')
return clean
def GetBackground(bgNumber):
bgImage = '/home/pi/pibooth/backgrounds/space.jpg'
return cv2.imread(bgImage)
def GetImage(bg):
ret, frame = cam.read()
sensitivity = 1
lowerRange = np.array([0, 0, 255 - sensitivity])
upperRange = np.array([255, sensitivity, 255])
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image_mask = cv2.inRange(hsv, lowerRange, upperRange)
bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)
fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))
img = cv2.add(bg_mask, fg_mask)
return img
cv2.namedWindow('Photobooth', cv2.WND_PROP_FULLSCREEN)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 4
countdownSeconds = 5
displayPhotoSeconds = 5
width = 640
height = 480
cam = cv2.VideoCapture(0)
cam.set(3, width)
cam.set(4, height)
bgNumber = 0
new_img_nums = random.sample(range(1, 9), 4)
bg = GetBackground(bgNumber)
clicked = False
clickedTime = {}
while True:
img = GetImage(bg)
key = cv2.waitKey(1)
if clicked == True:
elapsed = datetime.datetime.now() - clickedTime
secs = int(elapsed.total_seconds())
if secs > countdownSeconds:
clicked = False
cv2.imwrite('/home/pi/pibooth/newImages/img_' +
GetDateTimeString() + '.jpg', img)
cv2.imshow('Photobooth', img)
time.sleep(displayPhotoSeconds)
bgNumber += 1
bg = GetBackground(bgNumber)
else:
if secs - 5 == 1:
text = 'Say cheese!'
else:
text = str(5 - secs) + '...'
textSize, base = cv2.getTextSize(text, fontFace, fontScale,
thickness)
textWidth = int((width - textSize[0]) / 2)
textHeight = int((height + textSize[1]) / 2)
cv2.putText(img, text, (textWidth, textHeight), fontFace,
fontScale, (255, 255, 255), thickness)
elif key == 32:
clickedTime = datetime.datetime.now()
clicked = True
elif key == 27:
break
elif bgNumber == 4:
break
cv2.imshow('Photobooth', img)
cv2.destroyAllWindows()
cam.release()
<|reserved_special_token_1|>
import numpy as np
import cv2
import datetime
import random
# from random import randint
import time
import logging
def GetDateTimeString():
dt = str(datetime.datetime.now()).split(".")[0]
clean = dt.replace(" ","_").replace(":","_")
return clean
def GetBackground(bgNumber):
# bgImage = './backgrounds/' + str(new_img_nums[bgNumber]) + '.jpg'
bgImage = '/home/pi/pibooth/backgrounds/space.jpg'
return cv2.imread(bgImage)
def GetImage(bg):
ret, frame = cam.read()
sensitivity = 1 # play with sensitivity to get rid of noise...
lowerRange = np.array([0, 0, 255 - sensitivity]) # this is currently set to white
upperRange = np.array([255, sensitivity, 255]) # this is currently set to white
#Mask the green screen
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image_mask = cv2.inRange(hsv, lowerRange, upperRange)
bg_mask = cv2.bitwise_and(bg, bg, mask = image_mask)
fg_mask = cv2.bitwise_and(frame, frame, mask = cv2.bitwise_not(image_mask))
img = cv2.add(bg_mask, fg_mask)
return img
# Set up window for full screen
cv2.namedWindow("Photobooth", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("Photobooth", cv2.WND_PROP_FULLSCREEN, 1)
# options for countdown timer
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 4
countdownSeconds = 5
displayPhotoSeconds = 5
# Set up WebCam
width = 640
height = 480
cam = cv2.VideoCapture(0)
cam.set(3,width)
cam.set(4,height)
bgNumber = 0
new_img_nums = random.sample(range(1,9), 4)
bg = GetBackground(bgNumber)
clicked = False
clickedTime = {}
while(True):
img = GetImage(bg) #get masked image from webcam
key = cv2.waitKey(1) #check for keypress
if clicked == True : # if countdown timer started
elapsed = datetime.datetime.now() - clickedTime
secs = int(elapsed.total_seconds())
if secs > countdownSeconds : # if five seconds are up, save the current image
clicked = False
cv2.imwrite('/home/pi/pibooth/newImages/img_' + GetDateTimeString() + '.jpg',img)
# cv2.imwrite('./newImages/img_' + GetDateTimeString() + '.jpg',img)
cv2.imshow('Photobooth',img)
time.sleep(displayPhotoSeconds) # show the photo for 5 seconds
bgNumber += 1
bg = GetBackground(bgNumber) # get a new background
else : # show the countdown timer
if secs - 5 == 1:
text = 'Say cheese!'
else:
text = str(5 - secs) + "..."
textSize, base = cv2.getTextSize(text, fontFace, fontScale, thickness)
textWidth = int((width - textSize[0]) / 2)
textHeight = int((height + textSize[1]) / 2)
cv2.putText(img, text, (textWidth, textHeight), fontFace, fontScale, (255, 255, 255), thickness)
elif key == 32 : # on spacebar pressed, start the countdown timer
clickedTime = datetime.datetime.now()
clicked = True
elif key == 27 : # on escape, close the program
break
elif bgNumber == 4:
# assemble photos into strip
# print strip
# reset app
break
cv2.imshow('Photobooth',img) #display masked image
cv2.destroyAllWindows()
cam.release()
|
flexible
|
{
"blob_id": "a14c23398bbf42832a285d29c1b80aefc5fdaf6c",
"index": 9031,
"step-1": "<mask token>\n\n\ndef GetDateTimeString():\n dt = str(datetime.datetime.now()).split('.')[0]\n clean = dt.replace(' ', '_').replace(':', '_')\n return clean\n\n\ndef GetBackground(bgNumber):\n bgImage = '/home/pi/pibooth/backgrounds/space.jpg'\n return cv2.imread(bgImage)\n\n\ndef GetImage(bg):\n ret, frame = cam.read()\n sensitivity = 1\n lowerRange = np.array([0, 0, 255 - sensitivity])\n upperRange = np.array([255, sensitivity, 255])\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n image_mask = cv2.inRange(hsv, lowerRange, upperRange)\n bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)\n fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))\n img = cv2.add(bg_mask, fg_mask)\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef GetDateTimeString():\n dt = str(datetime.datetime.now()).split('.')[0]\n clean = dt.replace(' ', '_').replace(':', '_')\n return clean\n\n\ndef GetBackground(bgNumber):\n bgImage = '/home/pi/pibooth/backgrounds/space.jpg'\n return cv2.imread(bgImage)\n\n\ndef GetImage(bg):\n ret, frame = cam.read()\n sensitivity = 1\n lowerRange = np.array([0, 0, 255 - sensitivity])\n upperRange = np.array([255, sensitivity, 255])\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n image_mask = cv2.inRange(hsv, lowerRange, upperRange)\n bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)\n fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))\n img = cv2.add(bg_mask, fg_mask)\n return img\n\n\ncv2.namedWindow('Photobooth', cv2.WND_PROP_FULLSCREEN)\n<mask token>\ncam.set(3, width)\ncam.set(4, height)\n<mask token>\nwhile True:\n img = GetImage(bg)\n key = cv2.waitKey(1)\n if clicked == True:\n elapsed = datetime.datetime.now() - clickedTime\n secs = int(elapsed.total_seconds())\n if secs > countdownSeconds:\n clicked = False\n cv2.imwrite('/home/pi/pibooth/newImages/img_' +\n GetDateTimeString() + '.jpg', img)\n cv2.imshow('Photobooth', img)\n time.sleep(displayPhotoSeconds)\n bgNumber += 1\n bg = GetBackground(bgNumber)\n else:\n if secs - 5 == 1:\n text = 'Say cheese!'\n else:\n text = str(5 - secs) + '...'\n textSize, base = cv2.getTextSize(text, fontFace, fontScale,\n thickness)\n textWidth = int((width - textSize[0]) / 2)\n textHeight = int((height + textSize[1]) / 2)\n cv2.putText(img, text, (textWidth, textHeight), fontFace,\n fontScale, (255, 255, 255), thickness)\n elif key == 32:\n clickedTime = datetime.datetime.now()\n clicked = True\n elif key == 27:\n break\n elif bgNumber == 4:\n break\n cv2.imshow('Photobooth', img)\ncv2.destroyAllWindows()\ncam.release()\n",
"step-3": "<mask token>\n\n\ndef GetDateTimeString():\n dt = str(datetime.datetime.now()).split('.')[0]\n clean = dt.replace(' ', '_').replace(':', '_')\n return clean\n\n\ndef GetBackground(bgNumber):\n bgImage = '/home/pi/pibooth/backgrounds/space.jpg'\n return cv2.imread(bgImage)\n\n\ndef GetImage(bg):\n ret, frame = cam.read()\n sensitivity = 1\n lowerRange = np.array([0, 0, 255 - sensitivity])\n upperRange = np.array([255, sensitivity, 255])\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n image_mask = cv2.inRange(hsv, lowerRange, upperRange)\n bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)\n fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))\n img = cv2.add(bg_mask, fg_mask)\n return img\n\n\ncv2.namedWindow('Photobooth', cv2.WND_PROP_FULLSCREEN)\nfontFace = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = 1\nthickness = 4\ncountdownSeconds = 5\ndisplayPhotoSeconds = 5\nwidth = 640\nheight = 480\ncam = cv2.VideoCapture(0)\ncam.set(3, width)\ncam.set(4, height)\nbgNumber = 0\nnew_img_nums = random.sample(range(1, 9), 4)\nbg = GetBackground(bgNumber)\nclicked = False\nclickedTime = {}\nwhile True:\n img = GetImage(bg)\n key = cv2.waitKey(1)\n if clicked == True:\n elapsed = datetime.datetime.now() - clickedTime\n secs = int(elapsed.total_seconds())\n if secs > countdownSeconds:\n clicked = False\n cv2.imwrite('/home/pi/pibooth/newImages/img_' +\n GetDateTimeString() + '.jpg', img)\n cv2.imshow('Photobooth', img)\n time.sleep(displayPhotoSeconds)\n bgNumber += 1\n bg = GetBackground(bgNumber)\n else:\n if secs - 5 == 1:\n text = 'Say cheese!'\n else:\n text = str(5 - secs) + '...'\n textSize, base = cv2.getTextSize(text, fontFace, fontScale,\n thickness)\n textWidth = int((width - textSize[0]) / 2)\n textHeight = int((height + textSize[1]) / 2)\n cv2.putText(img, text, (textWidth, textHeight), fontFace,\n fontScale, (255, 255, 255), thickness)\n elif key == 32:\n clickedTime = datetime.datetime.now()\n clicked = True\n elif key == 27:\n break\n elif bgNumber == 4:\n break\n cv2.imshow('Photobooth', img)\ncv2.destroyAllWindows()\ncam.release()\n",
"step-4": "import numpy as np\nimport cv2\nimport datetime\nimport random\nimport time\nimport logging\n\n\ndef GetDateTimeString():\n dt = str(datetime.datetime.now()).split('.')[0]\n clean = dt.replace(' ', '_').replace(':', '_')\n return clean\n\n\ndef GetBackground(bgNumber):\n bgImage = '/home/pi/pibooth/backgrounds/space.jpg'\n return cv2.imread(bgImage)\n\n\ndef GetImage(bg):\n ret, frame = cam.read()\n sensitivity = 1\n lowerRange = np.array([0, 0, 255 - sensitivity])\n upperRange = np.array([255, sensitivity, 255])\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n image_mask = cv2.inRange(hsv, lowerRange, upperRange)\n bg_mask = cv2.bitwise_and(bg, bg, mask=image_mask)\n fg_mask = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(image_mask))\n img = cv2.add(bg_mask, fg_mask)\n return img\n\n\ncv2.namedWindow('Photobooth', cv2.WND_PROP_FULLSCREEN)\nfontFace = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = 1\nthickness = 4\ncountdownSeconds = 5\ndisplayPhotoSeconds = 5\nwidth = 640\nheight = 480\ncam = cv2.VideoCapture(0)\ncam.set(3, width)\ncam.set(4, height)\nbgNumber = 0\nnew_img_nums = random.sample(range(1, 9), 4)\nbg = GetBackground(bgNumber)\nclicked = False\nclickedTime = {}\nwhile True:\n img = GetImage(bg)\n key = cv2.waitKey(1)\n if clicked == True:\n elapsed = datetime.datetime.now() - clickedTime\n secs = int(elapsed.total_seconds())\n if secs > countdownSeconds:\n clicked = False\n cv2.imwrite('/home/pi/pibooth/newImages/img_' +\n GetDateTimeString() + '.jpg', img)\n cv2.imshow('Photobooth', img)\n time.sleep(displayPhotoSeconds)\n bgNumber += 1\n bg = GetBackground(bgNumber)\n else:\n if secs - 5 == 1:\n text = 'Say cheese!'\n else:\n text = str(5 - secs) + '...'\n textSize, base = cv2.getTextSize(text, fontFace, fontScale,\n thickness)\n textWidth = int((width - textSize[0]) / 2)\n textHeight = int((height + textSize[1]) / 2)\n cv2.putText(img, text, (textWidth, textHeight), fontFace,\n fontScale, (255, 255, 255), thickness)\n elif key == 32:\n clickedTime = datetime.datetime.now()\n clicked = True\n elif key == 27:\n break\n elif bgNumber == 4:\n break\n cv2.imshow('Photobooth', img)\ncv2.destroyAllWindows()\ncam.release()\n",
"step-5": "import numpy as np\nimport cv2\nimport datetime\nimport random\n# from random import randint\nimport time\nimport logging\n\ndef GetDateTimeString():\n dt = str(datetime.datetime.now()).split(\".\")[0]\n clean = dt.replace(\" \",\"_\").replace(\":\",\"_\")\n return clean\n\ndef GetBackground(bgNumber):\n # bgImage = './backgrounds/' + str(new_img_nums[bgNumber]) + '.jpg'\n bgImage = '/home/pi/pibooth/backgrounds/space.jpg'\n return cv2.imread(bgImage)\n\ndef GetImage(bg):\n ret, frame = cam.read()\n\n sensitivity = 1 # play with sensitivity to get rid of noise...\n lowerRange = np.array([0, 0, 255 - sensitivity]) # this is currently set to white\n upperRange = np.array([255, sensitivity, 255]) # this is currently set to white\n\n #Mask the green screen\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n image_mask = cv2.inRange(hsv, lowerRange, upperRange)\n bg_mask = cv2.bitwise_and(bg, bg, mask = image_mask)\n fg_mask = cv2.bitwise_and(frame, frame, mask = cv2.bitwise_not(image_mask))\n img = cv2.add(bg_mask, fg_mask)\n\n return img\n\n# Set up window for full screen\ncv2.namedWindow(\"Photobooth\", cv2.WND_PROP_FULLSCREEN)\n# cv2.setWindowProperty(\"Photobooth\", cv2.WND_PROP_FULLSCREEN, 1)\n\n# options for countdown timer\nfontFace = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = 1\nthickness = 4\ncountdownSeconds = 5\ndisplayPhotoSeconds = 5\n\n# Set up WebCam\nwidth = 640\nheight = 480\n\ncam = cv2.VideoCapture(0)\ncam.set(3,width)\ncam.set(4,height)\n\nbgNumber = 0\nnew_img_nums = random.sample(range(1,9), 4)\n\nbg = GetBackground(bgNumber)\nclicked = False\nclickedTime = {}\n\nwhile(True):\n img = GetImage(bg) #get masked image from webcam\n\n key = cv2.waitKey(1) #check for keypress\n if clicked == True : # if countdown timer started\n elapsed = datetime.datetime.now() - clickedTime\n secs = int(elapsed.total_seconds())\n if secs > countdownSeconds : # if five seconds are up, save the current image\n clicked = False\n cv2.imwrite('/home/pi/pibooth/newImages/img_' + GetDateTimeString() + '.jpg',img)\n # cv2.imwrite('./newImages/img_' + GetDateTimeString() + '.jpg',img)\n cv2.imshow('Photobooth',img)\n time.sleep(displayPhotoSeconds) # show the photo for 5 seconds\n bgNumber += 1\n bg = GetBackground(bgNumber) # get a new background\n else : # show the countdown timer\n if secs - 5 == 1:\n text = 'Say cheese!'\n else:\n text = str(5 - secs) + \"...\"\n textSize, base = cv2.getTextSize(text, fontFace, fontScale, thickness)\n textWidth = int((width - textSize[0]) / 2)\n textHeight = int((height + textSize[1]) / 2)\n cv2.putText(img, text, (textWidth, textHeight), fontFace, fontScale, (255, 255, 255), thickness)\n elif key == 32 : # on spacebar pressed, start the countdown timer\n clickedTime = datetime.datetime.now()\n clicked = True\n elif key == 27 : # on escape, close the program\n break\n elif bgNumber == 4:\n # assemble photos into strip\n # print strip\n # reset app\n break\n\n cv2.imshow('Photobooth',img) #display masked image\n\ncv2.destroyAllWindows()\ncam.release()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "seant@webreply.com" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/>
# <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/>
# <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def test_listOperation():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="shaggy@marketo.com",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well"
|
normal
|
{
"blob_id": "b05a5fcbba74bf4108bc953c6f868eb1f5ca298f",
"index": 638,
"step-1": "from pymarketo.client import MarketoClientFactory\nimport os\nimport sys #@UnusedImport\nimport time #@UnusedImport\nimport datetime #@UnusedImport\nfrom pprint import pprint #@UnresolvedImport\n\nTESTDIR = os.path.split(__file__)[0]\nPACKAGEDIR = os.path.join(TESTDIR,\"..\")\nINIFILE = os.path.join(PACKAGEDIR,\"marketo.ini\")\nDATAFILES=[\"specification\",\"listMObjects\"]\n\n\n# The following must be set up on your marketo account to enable tests\nLEADEMAIL = \"seant@webreply.com\" # Email of an internal contact\nLEADLIST = \"2wr-0\" # List name containing LEADEMAIL contact\nSPECIALCODE = \"WebReplyJobCode\" # If your leads have a custom field that can be\nSPECIALVALUE= \"WEBREPLY\" # asserted for LEADEMAIL, set them here\nTESTCAMPAIGN = \"SOAP API Access test\" # Name of test campaign that has SOAP API trigger enabled\nDELETECAMPAIGN = \"Delete lead\" # Campaign configure to delete leads added to the campaign\n\n# First and last names, and synthetic email addresses for new leads\n# These will be added and then deleted\nTESTDOMAIN=\"webreply.com\"\nTESTNAMES = [(\"One\",\"Test\",TESTDOMAIN),(\"Two\",\"Test\",TESTDOMAIN)]\nTESTEMAILS = [\"%s.%s@%s\" % name for name in TESTNAMES]\n\n\nmc = MarketoClientFactory(INIFILE)\n\ndef compareData(datafile, data):\n path = os.path.join(TESTDIR,datafile+\".txt\")\n return open(path).read().strip() == data.strip()\n\ndef test_data():\n \"Make sure that all the test data files are present\"\n assert os.path.exists(INIFILE)\n for datafile in DATAFILES:\n assert os.path.exists(os.path.join(TESTDIR,datafile+\".txt\"))\n \n# Factory methods to build structures for arguments\ndef aStringArray(strings):\n asa = mc.factory.create(\"ArrayOfString\")\n asa.stringItem = strings\n return asa\n\ndef aLeadKey(email=None,id=None):\n leadkey = mc.factory.create(\"LeadKey\")\n if email:\n leadkey.keyType = \"EMAIL\"\n leadkey.keyValue = email\n elif id:\n leadkey.keyType = \"IDNUM\"\n leadkey.keyValue = id\n return leadkey\n\ndef aLeadKeyArray(leads):\n lka = mc.factory.create(\"ArrayOfLeadKey\")\n lka.leadKey = leads\n return lka\n\ndef aListKey(lk, keyType = \"MKTOLISTNAME\"):\n listkey = mc.factory.create(\"ListKey\")\n listkey.keyType = keyType\n listkey.keyValue = lk\n return listkey\n\ndef anAttrib(**kwargs):\n attrib = mc.factory.create(\"Attrib\")\n for key, value in kwargs.items():\n setattr(attrib, key, value)\n return attrib\n\ndef anAttribArray(attribs):\n aa = mc.factory.create(\"ArrayOfAttrib\")\n aa.attrib=attribs\n return aa\n\ndef anAttribute(**kwargs):\n attrib = mc.factory.create(\"Attribute\")\n for key, value in kwargs.items():\n setattr(attrib, key, value)\n return attrib\n\ndef anAttributeArray(attributes):\n aa = mc.factory.create(\"ArrayOfAttribute\")\n aa.attribute=attributes\n return aa\n\ndef aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):\n lr = mc.factory.create(\"LeadRecord\")\n if id:\n lr.Id = id\n elif email:\n lr.Email = email\n elif foreignsyspersonid:\n assert foreignsystype\n lr.ForeignSysPersonId = foreignsyspersonid\n lr.ForeignSysType = foreignsystype\n if attributes:\n lr.leadAttributeList = attributes\n return lr\n\ndef aLeadRecordArray(leadrecords):\n lra = mc.factory.create(\"ArrayOfLeadRecord\")\n lra.leadRecord = leadrecords\n return lra\n\n# Several things come back with an attribute list that is more pleasant as a dictionary\ndef attrs2dict(attributelist):\n if attributelist is None:\n return {}\n attributelist = attributelist[0]\n d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])\n return d\ndef dict2attrs(d):\n al = []\n for key, value in d.items():\n al.append(anAttribute(attrName=key,attrValue=value))\n return anAttributeArray(al)\n\ndef test_specification():\n compareData(\"specification\", str(mc))\n\n\n# As of 1.7, these are the methods \n# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )\n# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )\n# Tested: describeMObject(xs:string objectName, )\n# Requires having a trigger set for the campaign, from Marketo support:\n# Your SOAP request is fine. In order for the getCampaignsForSource call to work, \n# you must have a \"Campaign is Requested\" trigger in the your campaign set to Web Service API.\n# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )\n# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )\n# Tested: getLead(LeadKey leadKey, )\n# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )\n# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )\n# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )\n# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )\n# Tested: listMObjects()\n# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )\n# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )\n# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )\n# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )\n# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )\n# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )\n# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )\n\n# Campaign sources\n# <xs:enumeration value=\"MKTOWS\"/>\n# <xs:enumeration value=\"SALES\"/>\n\ndef test_getCampaignsForSource():\n print \"Testing getCampaignsForSource\"\n campaigns = mc.service.getCampaignsForSource(\"MKTOWS\",None,False)\n resultCount = campaigns.returnCount\n campaignrecords = campaigns.campaignRecordList[0]\n assert resultCount==len(campaignrecords), \"Result count '%s' does not match campaign list '%s'\" % (resultCount, len(campaigns))\n for campaign in campaignrecords:\n print campaign.id, campaign.name, campaign.description\n print\n\n\n\ndef test_getLead():\n print \"Testing getLead\"\n leadkey = aLeadKey(email=LEADEMAIL)\n lead = mc.service.getLead(leadkey)\n assert lead.count == 1\n lead = lead.leadRecordList.leadRecord[0]\n attrs = attrs2dict(lead.leadAttributeList)\n print lead.Id, lead.Email\n pprint(attrs)\n if SPECIALCODE and SPECIALVALUE:\n assert attrs[SPECIALCODE] == SPECIALVALUE\n print\n\n \n# As of 1.7, theses are the activity types\n# <xs:enumeration value=\"VisitWebpage\"/>\n# <xs:enumeration value=\"FillOutForm\"/>\n# <xs:enumeration value=\"ClickLink\"/>\n# <xs:enumeration value=\"RegisterForEvent\"/>\n# <xs:enumeration value=\"AttendEvent\"/>\n# <xs:enumeration value=\"SendEmail\"/>\n# <xs:enumeration value=\"EmailDelivered\"/>\n# <xs:enumeration value=\"EmailBounced\"/>\n# <xs:enumeration value=\"UnsubscribeEmail\"/>\n# <xs:enumeration value=\"OpenEmail\"/>\n# <xs:enumeration value=\"ClickEmail\"/>\n# <xs:enumeration value=\"NewLead\"/>\n# <xs:enumeration value=\"ChangeDataValue\"/>\n# <xs:enumeration value=\"LeadAssigned\"/>\n# <xs:enumeration value=\"NewSFDCOpprtnty\"/>\n# <xs:enumeration value=\"Wait\"/>\n# <xs:enumeration value=\"RunSubflow\"/>\n# <xs:enumeration value=\"RemoveFromFlow\"/>\n# <xs:enumeration value=\"PushLeadToSales\"/>\n# <xs:enumeration value=\"CreateTask\"/>\n# <xs:enumeration value=\"ConvertLead\"/>\n# <xs:enumeration value=\"ChangeScore\"/>\n# <xs:enumeration value=\"ChangeOwner\"/>\n# <xs:enumeration value=\"AddToList\"/>\n# <xs:enumeration value=\"RemoveFromList\"/>\n# <xs:enumeration value=\"SFDCActivity\"/>\n# <xs:enumeration value=\"EmailBouncedSoft\"/>\n# <xs:enumeration value=\"PushLeadUpdatesToSales\"/>\n# <xs:enumeration value=\"DeleteLeadFromSales\"/>\n# <xs:enumeration value=\"SFDCActivityUpdated\"/>\n# <xs:enumeration value=\"SFDCMergeLeads\"/>\n# <xs:enumeration value=\"MergeLeads\"/>\n# <xs:enumeration value=\"ResolveConflicts\"/>\n# <xs:enumeration value=\"AssocWithOpprtntyInSales\"/>\n# <xs:enumeration value=\"DissocFromOpprtntyInSales\"/>\n# <xs:enumeration value=\"UpdateOpprtntyInSales\"/>\n# <xs:enumeration value=\"DeleteLead\"/>\n# <xs:enumeration value=\"SendAlert\"/>\n# <xs:enumeration value=\"SendSalesEmail\"/>\n# <xs:enumeration value=\"OpenSalesEmail\"/>\n# <xs:enumeration value=\"ClickSalesEmail\"/>\n# <xs:enumeration value=\"AddtoSFDCCampaign\"/>\n# <xs:enumeration value=\"RemoveFromSFDCCampaign\"/>\n# <xs:enumeration value=\"ChangeStatusInSFDCCampaign\"/>\n# <xs:enumeration value=\"ReceiveSalesEmail\"/>\n# <xs:enumeration value=\"InterestingMoment\"/>\n# <xs:enumeration value=\"RequestCampaign\"/>\n# <xs:enumeration value=\"SalesEmailBounced\"/>\n# <xs:enumeration value=\"ChangeLeadPartition\"/>\n# <xs:enumeration value=\"ChangeRevenueStage\"/>\n# <xs:enumeration value=\"ChangeRevenueStageManually\"/>\n# <xs:enumeration value=\"ComputeDataValue\"/>\n# <xs:enumeration value=\"ChangeStatusInProgression\"/>\n# <xs:enumeration value=\"ChangeFieldInProgram\"/>\n# <xs:enumeration value=\"EnrichWithJigsaw\"/>\ndef test_getLeadActivity():\n print \"Testing getLeadActivity\"\n leadkey = aLeadKey(email=LEADEMAIL)\n activities = mc.service.getLeadActivity(leadkey,\"\")\n assert activities.returnCount > 0\n activityrecords = activities.activityRecordList[0]\n assert len(activityrecords) == activities.returnCount\n for activity in activityrecords:\n print \"Activity\", activity.activityDateTime,activity.activityType\n attrs = attrs2dict(activity.activityAttributes)\n pprint(attrs)\n print\n \ndef test_requestCampaign():\n print \"Testing requestCampaign\"\n campaigns = mc.service.getCampaignsForSource(\"MKTOWS\",None,False)\n campaignrecords = campaigns.campaignRecordList[0]\n campaignid = None\n for campaign in campaignrecords:\n if campaign.name == TESTCAMPAIGN:\n print \"Found\", campaign.id, campaign.name, campaign.description\n campaignid = campaign.id\n break\n assert campaignid != None\n leadkey = aLeadKey(email=LEADEMAIL)\n lead = mc.service.getLead(leadkey)\n assert lead.count == 1\n lead = lead.leadRecordList.leadRecord[0]\n leadid = lead.Id\n # Add key appears to want ID\n leadkey = aLeadKey(id=leadid)\n lka = aLeadKeyArray([leadkey])\n result = mc.service.requestCampaign(\"MKTOWS\", campaignid, lka) \n assert result.success\n print\n \ndef test_deleteLeads():\n # Depends on a campaign that deletes leads as they ar added\n # We also need to know the IDNUM for the contacts\n lka = []\n for email in TESTEMAILS:\n leadkey = aLeadKey(email=email)\n lead = mc.service.getLead(leadkey)\n assert lead.count == 1\n lead = lead.leadRecordList.leadRecord[0]\n lka.append(aLeadKey(id=lead.Id))\n print \"Found lead\", lead.Id, lead.Email\n lka = aLeadKeyArray(lka)\n campaigns = mc.service.getCampaignsForSource(\"MKTOWS\",None,False)\n campaignrecords = campaigns.campaignRecordList[0]\n campaignid = None\n for campaign in campaignrecords:\n if campaign.name == DELETECAMPAIGN:\n print \"Found campaign\", campaign.id, campaign.name, campaign.description\n campaignid = campaign.id\n break\n assert campaignid != None\n result = mc.service.requestCampaign(\"MKTOWS\", campaignid, lka)\n print result\n \ndef test_getLeadChanges():\n print \"Testing getLeadChanges\"\n since = datetime.datetime(year=2010,month=1, day=1)\n changes = mc.service.getLeadChanges(\"\",since,10)\n assert changes.returnCount == 10\n changerecords = changes.leadChangeRecordList[0]\n assert len(changerecords) == changes.returnCount\n for change in changerecords:\n print \"leadChange\", change.activityDateTime,change.activityType\n pprint(attrs2dict(change.activityAttributes))\n print\n\ndef test_getMultipleLeads():\n print \"Testing getMultipleLeads\"\n lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)\n leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)\n assert leads.returnCount == 10\n leadrecords = leads.leadRecordList[0]\n assert len(leadrecords) == 10\n for lead in leadrecords:\n attrs = attrs2dict(lead.leadAttributeList)\n print \"Lead\", lead.Id, lead.Email\n pprint(attrs)\n print\n\ndef test_getMultipleLeadsUnsubscribedFlag():\n print \"Testing getMultipleLeadsUnsubscribedFlag\"\n lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)\n attributelist = aStringArray([\"Suppressed\"])\n leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)\n assert leads.returnCount == 10\n leadrecords = leads.leadRecordList[0]\n assert len(leadrecords) == 10\n for lead in leadrecords:\n attrs = attrs2dict(lead.leadAttributeList)\n print \"Lead\", lead.Id, lead.Email\n pprint(attrs)\n print\n\n# Valid list operations as of 1.7\n# <xs:enumeration value=\"ADDTOLIST\"/>\n# <xs:enumeration value=\"ISMEMBEROFLIST\"/>\n# <xs:enumeration value=\"REMOVEFROMLIST\"/>\n\n# Valid list types\n# <xs:enumeration value=\"MKTOLISTNAME\"/>\n# <xs:enumeration value=\"MKTOSALESUSERID\"/>\n# <xs:enumeration value=\"SFDCLEADOWNERID\"/>\n\ndef test_listOperation():\n print \"Testing listOperation\"\n # Require numeric id fields\n leadkey = aLeadKey(id=1256) # Is member\n leadkey2 = aLeadKey(id=1) # Is not member\n result = mc.service.listOperation(\"ISMEMBEROFLIST\",aListKey(LEADLIST),\n aLeadKeyArray([leadkey,leadkey2]),True)\n print \"listOperation\", result\n \ndef test_syncLead():\n print \"Testing syncLead\"\n # This test does a create the first time only.\n # The name and email are used in the \"standard\" marketo API examples\n attrs = dict(FirstName=\"Sam\",LastName=\"Haggy\")\n leadrecord = aLeadRecord(email=\"shaggy@marketo.com\",attributes=dict2attrs(attrs))\n result = mc.service.syncLead(leadrecord, True, None)\n print result.leadId, result.syncStatus.status\n \ndef test_syncMultipleLeads():\n print \"Testing syncMultipleLeads\" \n leadrecords = []\n for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):\n leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))\n leadrecords.append(leadrecord)\n lra = aLeadRecordArray(leadrecords)\n print lra\n result = mc.service.syncMultipleLeads(lra)\n print result\n print\n \ndef test_listMObjects():\n print \"Testing listMObjects\"\n mobjects = mc.service.listMObjects()\n compareData(\"listMObjects\", str(mobjects))\n print\n \ndef test_describeMObject():\n print \"Testing describeMObject\"\n mobjects = [\"ActivityRecord\",\"LeadRecord\",\"Opportunity\",\"OpportunityPersonRole\",]\n descriptions = []\n for mobject in mobjects:\n descriptions.append(str(mc.service.describeMObject(mobject)))\n descriptions = \"\\n\".join(descriptions)\n compareData(\"describeMObjects\", descriptions)\n print\n\n\nif __name__ == \"__main__\":\n test_data()\n test_specification()\n test_getLead()\n test_getCampaignsForSource() \n test_requestCampaign()\n test_getLeadActivity()\n test_getLeadChanges()\n test_listMObjects()\n test_describeMObject()\n test_getLeadActivity()\n test_getMultipleLeads()\n test_getMultipleLeadsUnsubscribedFlag()\n test_listOperation()\n test_syncLead()\n test_syncMultipleLeads()\n test_deleteLeads()\n print \"All is well\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Root(Controller):
def index(self):
return 'Hello World!'
def request_body(self):
return self.request.body.read()
def response_body(self):
return 'ä'
def request_headers(self):
return self.request.headers['A']
def response_headers(self):
self.response.headers['A'] = 'ä'
return 'ä'
def argument(self, arg):
return arg
def test_index(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b'Hello World!'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Root(Controller):
def index(self):
return 'Hello World!'
def request_body(self):
return self.request.body.read()
def response_body(self):
return 'ä'
def request_headers(self):
return self.request.headers['A']
def response_headers(self):
self.response.headers['A'] = 'ä'
return 'ä'
def argument(self, arg):
return arg
def test_index(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b'Hello World!'
<|reserved_special_token_0|>
def test_request_headers(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
body = b''
headers = {'A': 'ä'}
connection.request('GET', '/request_headers', body, headers)
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s == 'ä'.encode()
connection.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Root(Controller):
def index(self):
return 'Hello World!'
def request_body(self):
return self.request.body.read()
def response_body(self):
return 'ä'
def request_headers(self):
return self.request.headers['A']
def response_headers(self):
self.response.headers['A'] = 'ä'
return 'ä'
def argument(self, arg):
return arg
def test_index(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b'Hello World!'
@pytest.mark.parametrize('body', ['ä'.encode(), 'ä'.encode('iso8859-1')])
def test_request_body(webapp, body):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request('POST', '/request_body', body)
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s == body
connection.close()
<|reserved_special_token_0|>
def test_request_headers(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
body = b''
headers = {'A': 'ä'}
connection.request('GET', '/request_headers', body, headers)
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s == 'ä'.encode()
connection.close()
<|reserved_special_token_0|>
def test_argument(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
data = 'arg=%E2%86%92'
connection.request('POST', '/argument', data, {'Content-type':
'application/x-www-form-urlencoded'})
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s.decode('utf-8') == '→'
connection.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Root(Controller):
def index(self):
return 'Hello World!'
def request_body(self):
return self.request.body.read()
def response_body(self):
return 'ä'
def request_headers(self):
return self.request.headers['A']
def response_headers(self):
self.response.headers['A'] = 'ä'
return 'ä'
def argument(self, arg):
return arg
def test_index(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b'Hello World!'
@pytest.mark.parametrize('body', ['ä'.encode(), 'ä'.encode('iso8859-1')])
def test_request_body(webapp, body):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request('POST', '/request_body', body)
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s == body
connection.close()
def test_response_body(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request('GET', '/response_body')
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s == 'ä'.encode()
connection.close()
def test_request_headers(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
body = b''
headers = {'A': 'ä'}
connection.request('GET', '/request_headers', body, headers)
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s == 'ä'.encode()
connection.close()
<|reserved_special_token_0|>
def test_argument(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
data = 'arg=%E2%86%92'
connection.request('POST', '/argument', data, {'Content-type':
'application/x-www-form-urlencoded'})
response = connection.getresponse()
assert response.status == 200
assert response.reason == 'OK'
s = response.read()
assert s.decode('utf-8') == '→'
connection.close()
<|reserved_special_token_1|>
#!/usr/bin/env python
from http.client import HTTPConnection
import pytest
from circuits.web import Controller
from circuits.web.client import Client, request
from .helpers import urlopen
class Root(Controller):
def index(self):
return "Hello World!"
def request_body(self):
return self.request.body.read()
def response_body(self):
return "ä"
def request_headers(self):
return self.request.headers["A"]
def response_headers(self):
self.response.headers["A"] = "ä"
return "ä"
def argument(self, arg):
return arg
def test_index(webapp):
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
@pytest.mark.parametrize('body', [
"ä".encode(),
"ä".encode('iso8859-1'),
])
def test_request_body(webapp, body):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request("POST", "/request_body", body)
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s == body
connection.close()
def test_response_body(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
connection.request("GET", "/response_body")
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s == "ä".encode()
connection.close()
def test_request_headers(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
body = b""
headers = {"A": "ä"}
connection.request("GET", "/request_headers", body, headers)
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s == "ä".encode()
connection.close()
def test_response_headers(webapp):
client = Client()
client.start()
client.fire(
request(
"GET",
"http://%s:%s/response_headers" % (
webapp.server.host, webapp.server.port,
),
),
)
while client.response is None:
pass
assert client.response.status == 200
assert client.response.reason == 'OK'
s = client.response.read()
a = client.response.headers.get('A')
assert a == "ä"
assert s == "ä".encode()
def test_argument(webapp):
connection = HTTPConnection(webapp.server.host, webapp.server.port)
connection.connect()
data = 'arg=%E2%86%92'
connection.request("POST", "/argument", data, {"Content-type": "application/x-www-form-urlencoded"})
response = connection.getresponse()
assert response.status == 200
assert response.reason == "OK"
s = response.read()
assert s.decode('utf-8') == '\u2192'
connection.close()
|
flexible
|
{
"blob_id": "eb891341488e125ae8c043788d7264fff4018614",
"index": 6585,
"step-1": "<mask token>\n\n\nclass Root(Controller):\n\n def index(self):\n return 'Hello World!'\n\n def request_body(self):\n return self.request.body.read()\n\n def response_body(self):\n return 'ä'\n\n def request_headers(self):\n return self.request.headers['A']\n\n def response_headers(self):\n self.response.headers['A'] = 'ä'\n return 'ä'\n\n def argument(self, arg):\n return arg\n\n\ndef test_index(webapp):\n f = urlopen(webapp.server.http.base)\n s = f.read()\n assert s == b'Hello World!'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Root(Controller):\n\n def index(self):\n return 'Hello World!'\n\n def request_body(self):\n return self.request.body.read()\n\n def response_body(self):\n return 'ä'\n\n def request_headers(self):\n return self.request.headers['A']\n\n def response_headers(self):\n self.response.headers['A'] = 'ä'\n return 'ä'\n\n def argument(self, arg):\n return arg\n\n\ndef test_index(webapp):\n f = urlopen(webapp.server.http.base)\n s = f.read()\n assert s == b'Hello World!'\n\n\n<mask token>\n\n\ndef test_request_headers(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n body = b''\n headers = {'A': 'ä'}\n connection.request('GET', '/request_headers', body, headers)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s == 'ä'.encode()\n connection.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Root(Controller):\n\n def index(self):\n return 'Hello World!'\n\n def request_body(self):\n return self.request.body.read()\n\n def response_body(self):\n return 'ä'\n\n def request_headers(self):\n return self.request.headers['A']\n\n def response_headers(self):\n self.response.headers['A'] = 'ä'\n return 'ä'\n\n def argument(self, arg):\n return arg\n\n\ndef test_index(webapp):\n f = urlopen(webapp.server.http.base)\n s = f.read()\n assert s == b'Hello World!'\n\n\n@pytest.mark.parametrize('body', ['ä'.encode(), 'ä'.encode('iso8859-1')])\ndef test_request_body(webapp, body):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n connection.request('POST', '/request_body', body)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s == body\n connection.close()\n\n\n<mask token>\n\n\ndef test_request_headers(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n body = b''\n headers = {'A': 'ä'}\n connection.request('GET', '/request_headers', body, headers)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s == 'ä'.encode()\n connection.close()\n\n\n<mask token>\n\n\ndef test_argument(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n data = 'arg=%E2%86%92'\n connection.request('POST', '/argument', data, {'Content-type':\n 'application/x-www-form-urlencoded'})\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s.decode('utf-8') == '→'\n connection.close()\n",
"step-4": "<mask token>\n\n\nclass Root(Controller):\n\n def index(self):\n return 'Hello World!'\n\n def request_body(self):\n return self.request.body.read()\n\n def response_body(self):\n return 'ä'\n\n def request_headers(self):\n return self.request.headers['A']\n\n def response_headers(self):\n self.response.headers['A'] = 'ä'\n return 'ä'\n\n def argument(self, arg):\n return arg\n\n\ndef test_index(webapp):\n f = urlopen(webapp.server.http.base)\n s = f.read()\n assert s == b'Hello World!'\n\n\n@pytest.mark.parametrize('body', ['ä'.encode(), 'ä'.encode('iso8859-1')])\ndef test_request_body(webapp, body):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n connection.request('POST', '/request_body', body)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s == body\n connection.close()\n\n\ndef test_response_body(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n connection.request('GET', '/response_body')\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s == 'ä'.encode()\n connection.close()\n\n\ndef test_request_headers(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n body = b''\n headers = {'A': 'ä'}\n connection.request('GET', '/request_headers', body, headers)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s == 'ä'.encode()\n connection.close()\n\n\n<mask token>\n\n\ndef test_argument(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n data = 'arg=%E2%86%92'\n connection.request('POST', '/argument', data, {'Content-type':\n 'application/x-www-form-urlencoded'})\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == 'OK'\n s = response.read()\n assert s.decode('utf-8') == '→'\n connection.close()\n",
"step-5": "#!/usr/bin/env python\n\nfrom http.client import HTTPConnection\n\nimport pytest\n\nfrom circuits.web import Controller\nfrom circuits.web.client import Client, request\n\nfrom .helpers import urlopen\n\n\nclass Root(Controller):\n\n def index(self):\n return \"Hello World!\"\n\n def request_body(self):\n return self.request.body.read()\n\n def response_body(self):\n return \"ä\"\n\n def request_headers(self):\n return self.request.headers[\"A\"]\n\n def response_headers(self):\n self.response.headers[\"A\"] = \"ä\"\n return \"ä\"\n\n def argument(self, arg):\n return arg\n\n\ndef test_index(webapp):\n f = urlopen(webapp.server.http.base)\n s = f.read()\n assert s == b\"Hello World!\"\n\n\n@pytest.mark.parametrize('body', [\n \"ä\".encode(),\n \"ä\".encode('iso8859-1'),\n])\ndef test_request_body(webapp, body):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n\n connection.request(\"POST\", \"/request_body\", body)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == \"OK\"\n s = response.read()\n assert s == body\n\n connection.close()\n\n\ndef test_response_body(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n\n connection.request(\"GET\", \"/response_body\")\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == \"OK\"\n s = response.read()\n assert s == \"ä\".encode()\n\n connection.close()\n\n\ndef test_request_headers(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n\n body = b\"\"\n headers = {\"A\": \"ä\"}\n connection.request(\"GET\", \"/request_headers\", body, headers)\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == \"OK\"\n s = response.read()\n assert s == \"ä\".encode()\n\n connection.close()\n\n\ndef test_response_headers(webapp):\n client = Client()\n client.start()\n client.fire(\n request(\n \"GET\",\n \"http://%s:%s/response_headers\" % (\n webapp.server.host, webapp.server.port,\n ),\n ),\n )\n\n while client.response is None:\n pass\n assert client.response.status == 200\n assert client.response.reason == 'OK'\n s = client.response.read()\n a = client.response.headers.get('A')\n assert a == \"ä\"\n assert s == \"ä\".encode()\n\n\ndef test_argument(webapp):\n connection = HTTPConnection(webapp.server.host, webapp.server.port)\n connection.connect()\n\n data = 'arg=%E2%86%92'\n connection.request(\"POST\", \"/argument\", data, {\"Content-type\": \"application/x-www-form-urlencoded\"})\n response = connection.getresponse()\n assert response.status == 200\n assert response.reason == \"OK\"\n s = response.read()\n assert s.decode('utf-8') == '\\u2192'\n\n connection.close()\n",
"step-ids": [
8,
9,
11,
12,
15
]
}
|
[
8,
9,
11,
12,
15
] |
<|reserved_special_token_0|>
class Test(unittest.TestCase):
<|reserved_special_token_0|>
def test_take_comparison(self):
x = np.arange(1000000.0)
idx = np.random.random_integers(0, 100000.0, 1000000.0)
indexing.take(x, idx)
np.take(x, idx)
with Timer('numba') as nbtimer:
indexing.take(x, idx)
with Timer('numpy') as nptimer:
np.take(x, idx)
ratio = nbtimer.interval / nptimer.interval
print('numba version of take took %0.2f as long as numpy' % ratio)
def test_square_take(self):
X = np.random.random_integers(0, 50, 25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
def test_square_take_to_out(self):
X = np.arange(25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_resample_indices(self):
raw_index = np.arange(10)
resampled_index = np.arange(1, 10, 2)
result = indexing.get_resample_indices(raw_index, resampled_index)
expected = np.arange(0, 10, 2)
np.testing.assert_array_equal(expected, result)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
def test_take(self):
x = np.linspace(0, 100)
idx = np.random.random_integers(0, 50, 20)
result = indexing.take(x, idx)
expected = np.take(x, idx)
np.testing.assert_array_equal(expected, result)
def test_take_comparison(self):
x = np.arange(1000000.0)
idx = np.random.random_integers(0, 100000.0, 1000000.0)
indexing.take(x, idx)
np.take(x, idx)
with Timer('numba') as nbtimer:
indexing.take(x, idx)
with Timer('numpy') as nptimer:
np.take(x, idx)
ratio = nbtimer.interval / nptimer.interval
print('numba version of take took %0.2f as long as numpy' % ratio)
def test_square_take(self):
X = np.random.random_integers(0, 50, 25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
def test_square_take_to_out(self):
X = np.arange(25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_resample_indices(self):
raw_index = np.arange(10)
resampled_index = np.arange(1, 10, 2)
result = indexing.get_resample_indices(raw_index, resampled_index)
expected = np.arange(0, 10, 2)
np.testing.assert_array_equal(expected, result)
def test_take_upper_off_diagonal(self):
X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])
idx = np.array([0, 1])
expected = np.array([2])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([1, 2])
expected = np.array([6])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([0, 2])
expected = np.array([3])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([0, 1, 2])
expected = np.array([2, 3, 6])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
def test_take(self):
x = np.linspace(0, 100)
idx = np.random.random_integers(0, 50, 20)
result = indexing.take(x, idx)
expected = np.take(x, idx)
np.testing.assert_array_equal(expected, result)
def test_take_comparison(self):
x = np.arange(1000000.0)
idx = np.random.random_integers(0, 100000.0, 1000000.0)
indexing.take(x, idx)
np.take(x, idx)
with Timer('numba') as nbtimer:
indexing.take(x, idx)
with Timer('numpy') as nptimer:
np.take(x, idx)
ratio = nbtimer.interval / nptimer.interval
print('numba version of take took %0.2f as long as numpy' % ratio)
def test_square_take(self):
X = np.random.random_integers(0, 50, 25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
def test_square_take_to_out(self):
X = np.arange(25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
def test_square_take_performance(self):
X = np.arange(25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
result2 = indexing.square_take(X, idx)
np.testing.assert_array_equal(result, result2)
num_tests = 1000
nbts = []
nbts2 = []
npts = []
ms = 10, 20, 40, 80, 160
for m in ms:
X = np.arange(m * m).reshape(m, m)
idx = np.random.random_integers(0, m - 1, m // 2)
result = np.empty((idx.shape[0], idx.shape[0]))
with Timer('numba') as nbt:
for _ in range(num_tests):
indexing.square_take_to_out(X, idx, result)
nbts.append(nbt.interval)
with Timer('numba2') as nbt:
for _ in range(num_tests):
r = indexing.square_take(X, idx)
nbts2.append(nbt.interval)
with Timer('numpy') as npt:
for _ in range(num_tests):
X.take(idx, axis=0).take(idx, axis=1)
npts.append(npt.interval)
plt.plot(ms, nbts, label='nb to out')
plt.plot(ms, nbts2, label='nb new result')
plt.plot(ms, npts, label='np')
plt.title('square_take_to_out performance test')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
<|reserved_special_token_0|>
def test_get_resample_indices(self):
raw_index = np.arange(10)
resampled_index = np.arange(1, 10, 2)
result = indexing.get_resample_indices(raw_index, resampled_index)
expected = np.arange(0, 10, 2)
np.testing.assert_array_equal(expected, result)
def test_take_upper_off_diagonal(self):
X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])
idx = np.array([0, 1])
expected = np.array([2])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([1, 2])
expected = np.array([6])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([0, 2])
expected = np.array([3])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([0, 1, 2])
expected = np.array([2, 3, 6])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import matplotlib.pyplot as plt
from numerical_functions import Timer
import numerical_functions.numba_funcs.indexing as indexing
import numpy as np
import unittest
class Test(unittest.TestCase):
def test_take(self):
x = np.linspace(0, 100)
idx = np.random.random_integers(0, 50, 20)
result = indexing.take(x, idx)
expected = np.take(x, idx)
np.testing.assert_array_equal(expected, result)
def test_take_comparison(self):
x = np.arange(1000000.0)
idx = np.random.random_integers(0, 100000.0, 1000000.0)
indexing.take(x, idx)
np.take(x, idx)
with Timer('numba') as nbtimer:
indexing.take(x, idx)
with Timer('numpy') as nptimer:
np.take(x, idx)
ratio = nbtimer.interval / nptimer.interval
print('numba version of take took %0.2f as long as numpy' % ratio)
def test_square_take(self):
X = np.random.random_integers(0, 50, 25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
def test_square_take_to_out(self):
X = np.arange(25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
print(result)
expected = X.take(idx, axis=0).take(idx, axis=1)
print(expected)
np.testing.assert_array_equal(expected, result)
def test_square_take_performance(self):
X = np.arange(25).reshape(5, 5)
idx = np.arange(0, 4, 2)
result = np.empty((idx.shape[0], idx.shape[0]))
indexing.square_take_to_out(X, idx, result)
result2 = indexing.square_take(X, idx)
np.testing.assert_array_equal(result, result2)
num_tests = 1000
nbts = []
nbts2 = []
npts = []
ms = 10, 20, 40, 80, 160
for m in ms:
X = np.arange(m * m).reshape(m, m)
idx = np.random.random_integers(0, m - 1, m // 2)
result = np.empty((idx.shape[0], idx.shape[0]))
with Timer('numba') as nbt:
for _ in range(num_tests):
indexing.square_take_to_out(X, idx, result)
nbts.append(nbt.interval)
with Timer('numba2') as nbt:
for _ in range(num_tests):
r = indexing.square_take(X, idx)
nbts2.append(nbt.interval)
with Timer('numpy') as npt:
for _ in range(num_tests):
X.take(idx, axis=0).take(idx, axis=1)
npts.append(npt.interval)
plt.plot(ms, nbts, label='nb to out')
plt.plot(ms, nbts2, label='nb new result')
plt.plot(ms, npts, label='np')
plt.title('square_take_to_out performance test')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def test_square_and_rect_take_to_out(self):
X = np.arange(100).reshape((10, 10))
idx0 = np.arange(0, 4, 2)
idx1 = np.arange(4, 6)
result = np.empty((idx0.shape[0], idx0.shape[0] + idx1.shape[0]))
indexing.square_and_rect_take_to_out(X, idx0, idx1, result)
np.testing.assert_array_equal(result[:, :2], indexing.square_take(X,
idx0))
r2 = np.array([[4, 5], [24, 25]])
np.testing.assert_array_equal(r2, result[:, 2:])
def test_get_resample_indices(self):
raw_index = np.arange(10)
resampled_index = np.arange(1, 10, 2)
result = indexing.get_resample_indices(raw_index, resampled_index)
expected = np.arange(0, 10, 2)
np.testing.assert_array_equal(expected, result)
def test_take_upper_off_diagonal(self):
X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])
idx = np.array([0, 1])
expected = np.array([2])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([1, 2])
expected = np.array([6])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([0, 2])
expected = np.array([3])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
idx = np.array([0, 1, 2])
expected = np.array([2, 3, 6])
actual = indexing.take_upper_off_diagonal(X, idx)
np.testing.assert_array_equal(actual, expected)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
'''
Created on 27 Mar 2015
@author: Jon
'''
import matplotlib.pyplot as plt
from numerical_functions import Timer
import numerical_functions.numba_funcs.indexing as indexing
import numpy as np
import unittest
class Test(unittest.TestCase):
def test_take(self):
x = np.linspace( 0, 100 )
idx = np.random.random_integers( 0, 50, 20 )
result = indexing.take( x, idx )
expected = np.take( x, idx )
np.testing.assert_array_equal( expected, result )
def test_take_comparison(self):
x = np.arange( 1e6 )
idx = np.random.random_integers( 0, 1e5, 1e6 )
indexing.take( x, idx )
np.take( x, idx )
with Timer( 'numba' ) as nbtimer:
indexing.take( x, idx )
with Timer( 'numpy' ) as nptimer:
np.take( x, idx )
ratio = nbtimer.interval / nptimer.interval
print( 'numba version of take took %0.2f as long as numpy'%ratio)
def test_square_take(self):
X = np.random.random_integers( 0, 50, 25 ).reshape( 5, 5 )
idx = np.arange( 0, 4, 2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
indexing.square_take_to_out( X, idx, result )
print( result )
expected = X.take( idx, axis=0 ).take( idx, axis=1 )
print( expected )
np.testing.assert_array_equal( expected, result )
def test_square_take_to_out(self):
X = np.arange(25).reshape(5,5)
idx = np.arange( 0, 4, 2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
indexing.square_take_to_out( X, idx, result )
print( result )
expected = X.take( idx, axis=0 ).take( idx, axis=1 )
print( expected )
np.testing.assert_array_equal( expected, result )
def test_square_take_performance(self):
X = np.arange(25).reshape(5,5)
idx = np.arange( 0, 4, 2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
indexing.square_take_to_out( X, idx, result )
result2 = indexing.square_take( X, idx )
np.testing.assert_array_equal( result, result2 )
num_tests = 1000
nbts = []
nbts2 = []
npts = []
ms = ( 10, 20, 40, 80, 160 )#, 320, 640 )
for m in ms:
X = np.arange(m*m).reshape(m,m)
idx = np.random.random_integers( 0, m-1, m//2 )
result = np.empty( ( idx.shape[0], idx.shape[0] ) )
with Timer( 'numba' ) as nbt:
for _ in range( num_tests ):
indexing.square_take_to_out( X, idx, result )
nbts.append( nbt.interval )
with Timer( 'numba2' ) as nbt:
for _ in range( num_tests ):
r=indexing.square_take( X, idx )
nbts2.append( nbt.interval )
with Timer( 'numpy') as npt:
for _ in range(num_tests):
X.take( idx, axis=0 ).take( idx, axis=1 )
npts.append( npt.interval )
plt.plot( ms, nbts, label='nb to out' )
plt.plot( ms, nbts2, label='nb new result')
plt.plot( ms, npts, label='np' )
plt.title( 'square_take_to_out performance test')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def test_square_and_rect_take_to_out(self):
X = np.arange( 100 ).reshape( (10, 10 ) )
idx0 = np.arange( 0, 4, 2 )
idx1 = np.arange( 4, 6 )
result = np.empty( ( idx0.shape[0], idx0.shape[0]+idx1.shape[0] ) )
indexing.square_and_rect_take_to_out( X, idx0, idx1, result )
np.testing.assert_array_equal( result[:,:2], indexing.square_take( X, idx0 ) )
r2 = np.array( [ [ 4, 5 ], [24, 25 ] ] )
np.testing.assert_array_equal( r2, result[:,2:])
def test_get_resample_indices(self):
raw_index = np.arange( 10 )
resampled_index = np.arange( 1, 10, 2 )
result = indexing.get_resample_indices(raw_index, resampled_index)
expected = np.arange( 0, 10, 2 )
np.testing.assert_array_equal( expected, result )
def test_take_upper_off_diagonal(self):
X = np.array( [[ 1, 2, 3],
[ np.nan, 5, 6],
[ np.nan, np.nan, 9]])
idx = np.array( [ 0, 1 ] )
expected = np.array( [ 2 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
idx = np.array( [ 1, 2 ] )
expected = np.array( [ 6 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
idx = np.array( [ 0, 2 ] )
expected = np.array( [ 3 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
idx = np.array( [ 0, 1, 2 ] )
expected = np.array( [ 2, 3, 6 ] )
actual = indexing.take_upper_off_diagonal( X, idx )
np.testing.assert_array_equal( actual, expected )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
flexible
|
{
"blob_id": "ee80169afd4741854eff8619822a857bbf757575",
"index": 291,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n <mask token>\n <mask token>\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def test_take(self):\n x = np.linspace(0, 100)\n idx = np.random.random_integers(0, 50, 20)\n result = indexing.take(x, idx)\n expected = np.take(x, idx)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n <mask token>\n <mask token>\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_upper_off_diagonal(self):\n X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])\n idx = np.array([0, 1])\n expected = np.array([2])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([1, 2])\n expected = np.array([6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 2])\n expected = np.array([3])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 1, 2])\n expected = np.array([2, 3, 6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def test_take(self):\n x = np.linspace(0, 100)\n idx = np.random.random_integers(0, 50, 20)\n result = indexing.take(x, idx)\n expected = np.take(x, idx)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_performance(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n result2 = indexing.square_take(X, idx)\n np.testing.assert_array_equal(result, result2)\n num_tests = 1000\n nbts = []\n nbts2 = []\n npts = []\n ms = 10, 20, 40, 80, 160\n for m in ms:\n X = np.arange(m * m).reshape(m, m)\n idx = np.random.random_integers(0, m - 1, m // 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n with Timer('numba') as nbt:\n for _ in range(num_tests):\n indexing.square_take_to_out(X, idx, result)\n nbts.append(nbt.interval)\n with Timer('numba2') as nbt:\n for _ in range(num_tests):\n r = indexing.square_take(X, idx)\n nbts2.append(nbt.interval)\n with Timer('numpy') as npt:\n for _ in range(num_tests):\n X.take(idx, axis=0).take(idx, axis=1)\n npts.append(npt.interval)\n plt.plot(ms, nbts, label='nb to out')\n plt.plot(ms, nbts2, label='nb new result')\n plt.plot(ms, npts, label='np')\n plt.title('square_take_to_out performance test')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n <mask token>\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_upper_off_diagonal(self):\n X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])\n idx = np.array([0, 1])\n expected = np.array([2])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([1, 2])\n expected = np.array([6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 2])\n expected = np.array([3])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 1, 2])\n expected = np.array([2, 3, 6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nfrom numerical_functions import Timer\nimport numerical_functions.numba_funcs.indexing as indexing\nimport numpy as np\nimport unittest\n\n\nclass Test(unittest.TestCase):\n\n def test_take(self):\n x = np.linspace(0, 100)\n idx = np.random.random_integers(0, 50, 20)\n result = indexing.take(x, idx)\n expected = np.take(x, idx)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_comparison(self):\n x = np.arange(1000000.0)\n idx = np.random.random_integers(0, 100000.0, 1000000.0)\n indexing.take(x, idx)\n np.take(x, idx)\n with Timer('numba') as nbtimer:\n indexing.take(x, idx)\n with Timer('numpy') as nptimer:\n np.take(x, idx)\n ratio = nbtimer.interval / nptimer.interval\n print('numba version of take took %0.2f as long as numpy' % ratio)\n\n def test_square_take(self):\n X = np.random.random_integers(0, 50, 25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n print(result)\n expected = X.take(idx, axis=0).take(idx, axis=1)\n print(expected)\n np.testing.assert_array_equal(expected, result)\n\n def test_square_take_performance(self):\n X = np.arange(25).reshape(5, 5)\n idx = np.arange(0, 4, 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n indexing.square_take_to_out(X, idx, result)\n result2 = indexing.square_take(X, idx)\n np.testing.assert_array_equal(result, result2)\n num_tests = 1000\n nbts = []\n nbts2 = []\n npts = []\n ms = 10, 20, 40, 80, 160\n for m in ms:\n X = np.arange(m * m).reshape(m, m)\n idx = np.random.random_integers(0, m - 1, m // 2)\n result = np.empty((idx.shape[0], idx.shape[0]))\n with Timer('numba') as nbt:\n for _ in range(num_tests):\n indexing.square_take_to_out(X, idx, result)\n nbts.append(nbt.interval)\n with Timer('numba2') as nbt:\n for _ in range(num_tests):\n r = indexing.square_take(X, idx)\n nbts2.append(nbt.interval)\n with Timer('numpy') as npt:\n for _ in range(num_tests):\n X.take(idx, axis=0).take(idx, axis=1)\n npts.append(npt.interval)\n plt.plot(ms, nbts, label='nb to out')\n plt.plot(ms, nbts2, label='nb new result')\n plt.plot(ms, npts, label='np')\n plt.title('square_take_to_out performance test')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n\n def test_square_and_rect_take_to_out(self):\n X = np.arange(100).reshape((10, 10))\n idx0 = np.arange(0, 4, 2)\n idx1 = np.arange(4, 6)\n result = np.empty((idx0.shape[0], idx0.shape[0] + idx1.shape[0]))\n indexing.square_and_rect_take_to_out(X, idx0, idx1, result)\n np.testing.assert_array_equal(result[:, :2], indexing.square_take(X,\n idx0))\n r2 = np.array([[4, 5], [24, 25]])\n np.testing.assert_array_equal(r2, result[:, 2:])\n\n def test_get_resample_indices(self):\n raw_index = np.arange(10)\n resampled_index = np.arange(1, 10, 2)\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange(0, 10, 2)\n np.testing.assert_array_equal(expected, result)\n\n def test_take_upper_off_diagonal(self):\n X = np.array([[1, 2, 3], [np.nan, 5, 6], [np.nan, np.nan, 9]])\n idx = np.array([0, 1])\n expected = np.array([2])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([1, 2])\n expected = np.array([6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 2])\n expected = np.array([3])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n idx = np.array([0, 1, 2])\n expected = np.array([2, 3, 6])\n actual = indexing.take_upper_off_diagonal(X, idx)\n np.testing.assert_array_equal(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nCreated on 27 Mar 2015\n\n@author: Jon\n'''\n\nimport matplotlib.pyplot as plt\nfrom numerical_functions import Timer\nimport numerical_functions.numba_funcs.indexing as indexing\nimport numpy as np\nimport unittest\n\n\nclass Test(unittest.TestCase):\n \n def test_take(self):\n x = np.linspace( 0, 100 )\n idx = np.random.random_integers( 0, 50, 20 )\n result = indexing.take( x, idx )\n expected = np.take( x, idx )\n np.testing.assert_array_equal( expected, result )\n \n def test_take_comparison(self):\n x = np.arange( 1e6 )\n idx = np.random.random_integers( 0, 1e5, 1e6 )\n \n indexing.take( x, idx )\n np.take( x, idx )\n \n with Timer( 'numba' ) as nbtimer:\n indexing.take( x, idx )\n \n with Timer( 'numpy' ) as nptimer:\n np.take( x, idx )\n \n ratio = nbtimer.interval / nptimer.interval\n print( 'numba version of take took %0.2f as long as numpy'%ratio) \n \n \n def test_square_take(self):\n\n X = np.random.random_integers( 0, 50, 25 ).reshape( 5, 5 )\n idx = np.arange( 0, 4, 2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n indexing.square_take_to_out( X, idx, result )\n print( result )\n \n expected = X.take( idx, axis=0 ).take( idx, axis=1 )\n print( expected )\n \n np.testing.assert_array_equal( expected, result )\n \n def test_square_take_to_out(self):\n X = np.arange(25).reshape(5,5)\n idx = np.arange( 0, 4, 2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n indexing.square_take_to_out( X, idx, result )\n print( result )\n \n expected = X.take( idx, axis=0 ).take( idx, axis=1 )\n print( expected )\n \n np.testing.assert_array_equal( expected, result )\n \n def test_square_take_performance(self):\n X = np.arange(25).reshape(5,5)\n idx = np.arange( 0, 4, 2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n indexing.square_take_to_out( X, idx, result )\n \n result2 = indexing.square_take( X, idx )\n \n np.testing.assert_array_equal( result, result2 )\n\n num_tests = 1000\n \n nbts = []\n nbts2 = []\n npts = [] \n \n ms = ( 10, 20, 40, 80, 160 )#, 320, 640 )\n for m in ms:\n X = np.arange(m*m).reshape(m,m)\n idx = np.random.random_integers( 0, m-1, m//2 )\n result = np.empty( ( idx.shape[0], idx.shape[0] ) )\n with Timer( 'numba' ) as nbt:\n for _ in range( num_tests ):\n indexing.square_take_to_out( X, idx, result )\n nbts.append( nbt.interval ) \n \n with Timer( 'numba2' ) as nbt:\n for _ in range( num_tests ):\n r=indexing.square_take( X, idx ) \n nbts2.append( nbt.interval ) \n \n with Timer( 'numpy') as npt:\n for _ in range(num_tests):\n X.take( idx, axis=0 ).take( idx, axis=1 )\n npts.append( npt.interval ) \n \n plt.plot( ms, nbts, label='nb to out' )\n plt.plot( ms, nbts2, label='nb new result')\n plt.plot( ms, npts, label='np' )\n plt.title( 'square_take_to_out performance test')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n \n def test_square_and_rect_take_to_out(self):\n \n X = np.arange( 100 ).reshape( (10, 10 ) )\n idx0 = np.arange( 0, 4, 2 )\n idx1 = np.arange( 4, 6 )\n \n result = np.empty( ( idx0.shape[0], idx0.shape[0]+idx1.shape[0] ) )\n indexing.square_and_rect_take_to_out( X, idx0, idx1, result )\n \n np.testing.assert_array_equal( result[:,:2], indexing.square_take( X, idx0 ) )\n r2 = np.array( [ [ 4, 5 ], [24, 25 ] ] )\n np.testing.assert_array_equal( r2, result[:,2:]) \n\n def test_get_resample_indices(self):\n \n raw_index = np.arange( 10 )\n resampled_index = np.arange( 1, 10, 2 )\n\n result = indexing.get_resample_indices(raw_index, resampled_index)\n expected = np.arange( 0, 10, 2 )\n \n np.testing.assert_array_equal( expected, result )\n\n def test_take_upper_off_diagonal(self):\n\n X = np.array( [[ 1, 2, 3],\n [ np.nan, 5, 6],\n [ np.nan, np.nan, 9]])\n\n idx = np.array( [ 0, 1 ] )\n expected = np.array( [ 2 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n idx = np.array( [ 1, 2 ] )\n expected = np.array( [ 6 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n idx = np.array( [ 0, 2 ] )\n expected = np.array( [ 3 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n idx = np.array( [ 0, 1, 2 ] )\n expected = np.array( [ 2, 3, 6 ] )\n actual = indexing.take_upper_off_diagonal( X, idx )\n np.testing.assert_array_equal( actual, expected )\n\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()",
"step-ids": [
5,
7,
8,
11,
12
]
}
|
[
5,
7,
8,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Hello world')
print('Hello again')
print('Hello again')
<|reserved_special_token_1|>
print("""Hello world""")
print("Hello again")
print('Hello again')
|
flexible
|
{
"blob_id": "fe82a46a7965b27729ff5bd61c1059416c96cae7",
"index": 8015,
"step-1": "<mask token>\n",
"step-2": "print('Hello world')\nprint('Hello again')\nprint('Hello again')\n",
"step-3": "print(\"\"\"Hello world\"\"\")\nprint(\"Hello again\")\nprint('Hello again')",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ('__title__', '__summary__', '__version__', '__author__',
'__license__', '__copyright__')
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
<|reserved_special_token_0|>
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__ = 'glenl.glx@gmail.com'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ('__title__', '__summary__', '__version__', '__author__',
'__license__', '__copyright__')
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
<|reserved_special_token_0|>
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__ = 'glenl.glx@gmail.com'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
from .assets import collect_assets
from .commands.build import build
from .commands.check import check
from .commands.init import init
from .commands.tag import tag
from .commands.clean import clean
from .config import CONFIG_DICT, CONFIG_DIR, getDBPath
from .config import test_config, saveConfig
from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE
from .core import id_from_footer
from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException
from .header import Loader, LYLoader, VersionLoader
from .header import RawLoader, Header, REQUIRED_FIELDS
from .header import find_header
from .lily import LyLocator, LyVersion
from .validate import Validator, DBValidator, in_repository
from .tagedit import tag_header, tag_file
from .rdfu import NS, MuRDF
from .utils import resolve_input, resolve_lysfile
<|reserved_special_token_1|>
""" mupub module.
"""
__all__ = (
'__title__', '__summary__', '__version__',
'__author__', '__license__', '__copyright__',
)
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
"""Versioning:
This utility follows a MAJOR . MINOR . EDIT format. Upon a major
release, the MAJOR number is incremented and the MINOR is zeroed.
During development of an upcoming release, the MINOR number may be
incremented.
"""
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__= 'glenl.glx@gmail.com'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
from .assets import collect_assets
from .commands.build import build
from .commands.check import check
from .commands.init import init
from .commands.tag import tag
from .commands.clean import clean
from .config import CONFIG_DICT, CONFIG_DIR, getDBPath
from .config import test_config, saveConfig
from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE
from .core import id_from_footer
from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException
from .header import Loader, LYLoader, VersionLoader
from .header import RawLoader, Header, REQUIRED_FIELDS
from .header import find_header
from .lily import LyLocator, LyVersion
from .validate import Validator, DBValidator, in_repository
from .tagedit import tag_header, tag_file
from .rdfu import NS, MuRDF
from .utils import resolve_input,resolve_lysfile
|
flexible
|
{
"blob_id": "eabf06481509962652812af67ad59da5cfe30fae",
"index": 1,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ('__title__', '__summary__', '__version__', '__author__',\n '__license__', '__copyright__')\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n<mask token>\n__version__ = '1.0.8'\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__ = 'glenl.glx@gmail.com'\n__uri__ = 'http://mutopiaproject.org/'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\n<mask token>\n",
"step-3": "<mask token>\n__all__ = ('__title__', '__summary__', '__version__', '__author__',\n '__license__', '__copyright__')\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n<mask token>\n__version__ = '1.0.8'\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__ = 'glenl.glx@gmail.com'\n__uri__ = 'http://mutopiaproject.org/'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\nfrom .assets import collect_assets\nfrom .commands.build import build\nfrom .commands.check import check\nfrom .commands.init import init\nfrom .commands.tag import tag\nfrom .commands.clean import clean\nfrom .config import CONFIG_DICT, CONFIG_DIR, getDBPath\nfrom .config import test_config, saveConfig\nfrom .core import MUTOPIA_BASE, FTP_BASE, URL_BASE\nfrom .core import id_from_footer\nfrom .exceptions import BadConfiguration, IncompleteBuild, TagProcessException\nfrom .header import Loader, LYLoader, VersionLoader\nfrom .header import RawLoader, Header, REQUIRED_FIELDS\nfrom .header import find_header\nfrom .lily import LyLocator, LyVersion\nfrom .validate import Validator, DBValidator, in_repository\nfrom .tagedit import tag_header, tag_file\nfrom .rdfu import NS, MuRDF\nfrom .utils import resolve_input, resolve_lysfile\n",
"step-4": "\"\"\" mupub module.\n\"\"\"\n\n__all__ = (\n '__title__', '__summary__', '__version__',\n '__author__', '__license__', '__copyright__',\n)\n\n\n__title__ = 'mupub'\n__summary__ = 'Musical score publishing utility for the Mutopia Project'\n\n\"\"\"Versioning:\nThis utility follows a MAJOR . MINOR . EDIT format. Upon a major\nrelease, the MAJOR number is incremented and the MINOR is zeroed.\nDuring development of an upcoming release, the MINOR number may be\nincremented.\n\n\"\"\"\n__version__ = '1.0.8'\n\n__author__ = 'Glen Larsen, Chris Sawer'\n__author_email__= 'glenl.glx@gmail.com'\n__uri__ = 'http://mutopiaproject.org/'\n\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018 The Mutopia Project'\n\nfrom .assets import collect_assets\nfrom .commands.build import build\nfrom .commands.check import check\nfrom .commands.init import init\nfrom .commands.tag import tag\nfrom .commands.clean import clean\nfrom .config import CONFIG_DICT, CONFIG_DIR, getDBPath\nfrom .config import test_config, saveConfig\nfrom .core import MUTOPIA_BASE, FTP_BASE, URL_BASE\nfrom .core import id_from_footer\nfrom .exceptions import BadConfiguration, IncompleteBuild, TagProcessException\nfrom .header import Loader, LYLoader, VersionLoader\nfrom .header import RawLoader, Header, REQUIRED_FIELDS\nfrom .header import find_header\nfrom .lily import LyLocator, LyVersion\nfrom .validate import Validator, DBValidator, in_repository\nfrom .tagedit import tag_header, tag_file\nfrom .rdfu import NS, MuRDF\nfrom .utils import resolve_input,resolve_lysfile\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_backfill_totals_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch(
'app.commands.send_total_sent_notifications_to_performance_platform')
backfill_performance_platform_totals.callback.__wrapped__(datetime(2017,
8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 1))
send_mock.assert_any_call(datetime(2017, 8, 2))
send_mock.assert_any_call(datetime(2017, 8, 3))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch(
'app.commands.send_processing_time_for_start_and_end')
backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1),
datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,
4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,
4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,
4, 4, 0))
def test_backfill_totals_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch(
'app.commands.send_total_sent_notifications_to_performance_platform')
backfill_performance_platform_totals.callback.__wrapped__(datetime(2017,
8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 1))
send_mock.assert_any_call(datetime(2017, 8, 2))
send_mock.assert_any_call(datetime(2017, 8, 3))
<|reserved_special_token_1|>
from datetime import datetime
from app.commands import backfill_performance_platform_totals, backfill_processing_time
def test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch(
'app.commands.send_processing_time_for_start_and_end')
backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1),
datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,
4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,
4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,
4, 4, 0))
def test_backfill_totals_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch(
'app.commands.send_total_sent_notifications_to_performance_platform')
backfill_performance_platform_totals.callback.__wrapped__(datetime(2017,
8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 1))
send_mock.assert_any_call(datetime(2017, 8, 2))
send_mock.assert_any_call(datetime(2017, 8, 3))
<|reserved_special_token_1|>
from datetime import datetime
from app.commands import backfill_performance_platform_totals, backfill_processing_time
# This test assumes the local timezone is EST
def test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch("app.commands.send_processing_time_for_start_and_end")
# backfill_processing_time is a click.Command object - if you try invoking the callback on its own, it
# throws a `RuntimeError: There is no active click context.` - so get at the original function using __wrapped__
backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))
def test_backfill_totals_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch("app.commands.send_total_sent_notifications_to_performance_platform")
# backfill_processing_time is a click.Command object - if you try invoking the callback on its own, it
# throws a `RuntimeError: There is no active click context.` - so get at the original function using __wrapped__
backfill_performance_platform_totals.callback.__wrapped__(datetime(2017, 8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 1))
send_mock.assert_any_call(datetime(2017, 8, 2))
send_mock.assert_any_call(datetime(2017, 8, 3))
|
flexible
|
{
"blob_id": "fcb1285648f6728e3dad31ad4b602fa4e5c5b422",
"index": 9230,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_backfill_totals_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\n 'app.commands.send_total_sent_notifications_to_performance_platform')\n backfill_performance_platform_totals.callback.__wrapped__(datetime(2017,\n 8, 1), datetime(2017, 8, 3))\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 1))\n send_mock.assert_any_call(datetime(2017, 8, 2))\n send_mock.assert_any_call(datetime(2017, 8, 3))\n",
"step-3": "<mask token>\n\n\ndef test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\n 'app.commands.send_processing_time_for_start_and_end')\n backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1),\n datetime(2017, 8, 3))\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,\n 4, 4, 0))\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,\n 4, 4, 0))\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,\n 4, 4, 0))\n\n\ndef test_backfill_totals_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\n 'app.commands.send_total_sent_notifications_to_performance_platform')\n backfill_performance_platform_totals.callback.__wrapped__(datetime(2017,\n 8, 1), datetime(2017, 8, 3))\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 1))\n send_mock.assert_any_call(datetime(2017, 8, 2))\n send_mock.assert_any_call(datetime(2017, 8, 3))\n",
"step-4": "from datetime import datetime\nfrom app.commands import backfill_performance_platform_totals, backfill_processing_time\n\n\ndef test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\n 'app.commands.send_processing_time_for_start_and_end')\n backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1),\n datetime(2017, 8, 3))\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,\n 4, 4, 0))\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,\n 4, 4, 0))\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8,\n 4, 4, 0))\n\n\ndef test_backfill_totals_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\n 'app.commands.send_total_sent_notifications_to_performance_platform')\n backfill_performance_platform_totals.callback.__wrapped__(datetime(2017,\n 8, 1), datetime(2017, 8, 3))\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 1))\n send_mock.assert_any_call(datetime(2017, 8, 2))\n send_mock.assert_any_call(datetime(2017, 8, 3))\n",
"step-5": "from datetime import datetime\n\nfrom app.commands import backfill_performance_platform_totals, backfill_processing_time\n\n\n# This test assumes the local timezone is EST\ndef test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\"app.commands.send_processing_time_for_start_and_end\")\n\n # backfill_processing_time is a click.Command object - if you try invoking the callback on its own, it\n # throws a `RuntimeError: There is no active click context.` - so get at the original function using __wrapped__\n backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1), datetime(2017, 8, 3))\n\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))\n send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))\n\n\ndef test_backfill_totals_works_for_correct_dates(mocker, notify_api):\n send_mock = mocker.patch(\"app.commands.send_total_sent_notifications_to_performance_platform\")\n\n # backfill_processing_time is a click.Command object - if you try invoking the callback on its own, it\n # throws a `RuntimeError: There is no active click context.` - so get at the original function using __wrapped__\n backfill_performance_platform_totals.callback.__wrapped__(datetime(2017, 8, 1), datetime(2017, 8, 3))\n\n assert send_mock.call_count == 3\n send_mock.assert_any_call(datetime(2017, 8, 1))\n send_mock.assert_any_call(datetime(2017, 8, 2))\n send_mock.assert_any_call(datetime(2017, 8, 3))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# coding: utf-8
# # import re
# # import urllib
# #
# #
# # def getHtml(url):
# # page = urllib.urlopen(url)
# # html = page.read()
# # return html
# #
# #
# # def getMp4(html):
# # r = r"href='(http.*\.mp4)'"
# # re_mp4 = re.compile(r)
# # mp4List = re.findall(re_mp4, html)
# # filename = 1
# # for mp4url in mp4List:
# # urllib.urlretrieve(mp4url, "%s.mp4" % filename)
# # print 'file "%s.mp4" done' % filename
# # filename += 1
# # url = "http://v.youku.com/v_show/id_XMjYxMjEyNDU0MA==.html"
# # html = getHtml(url)
# # getMp4(html)
#
#
#
#
# # import re
# #
# #
# # pattern = re.compile(r'hello world')
# # match = pattern.match('hello world!')
# #
# # if match:
# # print match.group()
#
#
# #
# # # 冒泡排序
# # array = [4, 5, 0, 2, 3, 7, 1, 6]
# #
# # for i in range(len(array) - 1, 1, -1):
# # for j in range(0, i):
# # if array[j] > array[j + 1]:
# # array[j], array[j + 1] = array[j + 1], array[j]
# # print array
#
# # theString = 'saaaay yes no yaaaass'
# # print theString.strip('say') #say后面有空格
#
#
#
# # -*- coding:utf-8 -*-
# import urllib
# import urllib2
# import re
# import thread
# import time
#
#
# # 糗事百科爬虫类
# class QSBK:
# # 初始化方法,定义一些变量
# def __init__(self):
# self.pageIndex = 1
# self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
# # 初始化headers
# self.headers = {'User-Agent': self.user_agent}
# # 存放段子的变量,每一个元素是每一页的段子们
# self.stories = []
# # 存放程序是否继续运行的变量
# self.enable = False
#
# # 传入某一页的索引获得页面代码
# def getPage(self, pageIndex):
# try:
# url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
# # 构建请求的request
# request = urllib2.Request(url, headers=self.headers)
# # 利用urlopen获取页面代码
# response = urllib2.urlopen(request)
# # 将页面转化为UTF-8编码
# pageCode = response.read().decode('utf-8')
# return pageCode
#
# except urllib2.URLError, e:
# if hasattr(e, "reason"):
# print u"连接糗事百科失败,错误原因", e.reason
# return None
#
# # 传入某一页代码,返回本页不带图片的段子列表
# def getPageItems(self, pageIndex):
# pageCode = self.getPage(pageIndex)
# if not pageCode:
# print "页面加载失败...."
# return None
# # pattern = re.compile('<div class=author clearfix>.*?<img src=.*? alt=(.*?)>.*?<div.*?' +
# # '<span>(.*?)</span>.*?stats-vote><i class=number>(.*?)</i>.*?' +
# # '<i class=number>(.*?)</i>', re.S)
# pattern = re.compile('h2>(.*?)</h2.*?content">(.*?)</.*?number">(.*?)</', re.S)
# items = re.findall(pattern, pageCode)
# # 用来存储每页的段子们
# pageStories = []
# # 遍历正则表达式匹配的信息
# # for item in items:
# # # 是否含有图片
# # haveImg = re.search("img", item[3])
# # # 如果不含有图片,把它加入list中
# # if not haveImg:
# # replaceBR = re.compile('<br/>')
# # text = re.sub(replaceBR, "\n", item[1])
# # # item[0]是一个段子的发布者,item[1]是内容,item[2]是发布时间,item[4]是点赞数
# # pageStories.append([item[0].strip(), text.strip(), item[2].strip(), item[4].strip()])
# # return pageStories
# for item in items:
# pageStories.append([item[0].strip(), item[1].strip(), item[2].strip()])
# return pageStories
#
# # 加载并提取页面的内容,加入到列表中
# def loadPage(self):
# # 如果当前未看的页数少于2页,则加载新一页
# if self.enable == True:
# if len(self.stories) < 2:
# # 获取新一页
# pageStories = self.getPageItems(self.pageIndex)
# # 将该页的段子存放到全局list中
# if pageStories:
# self.stories.append(pageStories)
# # 获取完之后页码索引加一,表示下次读取下一页
# self.pageIndex += 1
#
# # 调用该方法,每次敲回车打印输出一个段子
# def getOneStory(self, pageStories, page):
# # 遍历一页的段子
# for story in pageStories:
# # 等待用户输入
# input = raw_input()
# # 每当输入回车一次,判断一下是否要加载新页面
# self.loadPage()
# # 如果输入Q则程序结束
# if input == "Q":
# self.enable = False
# return
# print u"第%d页\t发布人:%s\t 赞:%s\n%s" % (page, story[0], story[2], story[1])
#
# # 开始方法
# def start(self):
# print u"正在读取糗事百科,按回车查看新段子,Q退出"
# # 使变量为True,程序可以正常运行
# self.enable = True
# # 先加载一页内容
# self.loadPage()
# # 局部变量,控制当前读到了第几页
# nowPage = 0
# while self.enable:
# if len(self.stories) > 0:
# # 从全局list中获取一页的段子
# pageStories = self.stories[0]
# # 当前读到的页数加一
# nowPage += 1
# # 将全局list中第一个元素删除,因为已经取出
# del self.stories[0]
# # 输出该页的段子
# self.getOneStory(pageStories, nowPage)
#
#
# spider = QSBK()
# spider.start()
#
# print [x * x for x in range(1, 11) if x % 2 == 0]
def _odd_iter():
n = 1
while True:
n += 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter() # 初始序列
while True:
n = next(it) # 返回序列的第一个数
yield n
it = filter(_not_divisible(n), it) # 构造新序列
def main():
# 打印1000以内的素数:
for n in primes():
if n < 1000:
print(n)
else:
break
def is_palindrome(n):
return int(str(n)[::-1]) == n
def count():
def f(j):
# def g():
return j*j
# return g
fs = []
for i in range(1, 4):
fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()
return fs
from PIL import Image
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
from multiprocessing import Process, Pool
import os, time, random
def run_proc(name):
print("Run child process %s %s" % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, (start - end)))
def chinese_to_pinyin(x):
"""参数为字符串,返回为该字符串对应的汉语拼音"""
y = ''
# dic = {}
# with open("unicode_pinyin.txt") as f:
# for i in f.readlines():
# dic[i.split()[0]] = i.split()[1]
for i in x:
i = str(i.encode('unicode_escape'))[-5:-1].upper()
# try:
# y += dic[i] + ' '
# except:
# y += 'XXXX ' # 非法字符我们用XXXX代替
return y
if __name__ == '__main__':
# main()
# print(_not_divisible(3))
# output = filter(is_palindrome, range(1, 1000))
# print(list(output))
# print(range(100))[::-1]
# f1, f2, f3 = count()
# print(f1)
# print(f2)
# print(f3)
# changeImage()
# print("Parent process %s ", os.getpid())
# p = Process(target=run_proc, args=("test",))
# print('Child process will start.')
# p.start()
# p.join()
# print('Child process end.')
# print("Parent process %s ", os.getpid())
# p = Pool(5)
# for i in range(5):
# p.apply_async(long_time_task, args=(i,))
# print('Waiting for all subprocesses done...')
# p.close()
# p.join()
# print('All subprocesses done.')
print(chinese_to_pinyin(u"陈"))
|
normal
|
{
"blob_id": "ad94118b43e130aec5df3976fd0460164de17511",
"index": 8361,
"step-1": "<mask token>\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n\n def f(j):\n return j * j\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n return y\n\n\n<mask token>\n",
"step-3": "def _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n\n def f(j):\n return j * j\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n return y\n\n\n<mask token>\n",
"step-4": "def _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n\n def f(j):\n return j * j\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n return y\n\n\nif __name__ == '__main__':\n print(chinese_to_pinyin(u'陈'))\n",
"step-5": "#!/usr/bin/python\n# coding: utf-8\n\n\n# # import re\n# # import urllib\n# #\n# #\n# # def getHtml(url):\n# # page = urllib.urlopen(url)\n# # html = page.read()\n# # return html\n# #\n# #\n# # def getMp4(html):\n# # r = r\"href='(http.*\\.mp4)'\"\n# # re_mp4 = re.compile(r)\n# # mp4List = re.findall(re_mp4, html)\n# # filename = 1\n# # for mp4url in mp4List:\n# # urllib.urlretrieve(mp4url, \"%s.mp4\" % filename)\n# # print 'file \"%s.mp4\" done' % filename\n# # filename += 1\n# # url = \"http://v.youku.com/v_show/id_XMjYxMjEyNDU0MA==.html\"\n# # html = getHtml(url)\n# # getMp4(html)\n#\n#\n#\n#\n# # import re\n# #\n# #\n# # pattern = re.compile(r'hello world')\n# # match = pattern.match('hello world!')\n# #\n# # if match:\n# # print match.group()\n#\n#\n# #\n# # # 冒泡排序\n# # array = [4, 5, 0, 2, 3, 7, 1, 6]\n# #\n# # for i in range(len(array) - 1, 1, -1):\n# # for j in range(0, i):\n# # if array[j] > array[j + 1]:\n# # array[j], array[j + 1] = array[j + 1], array[j]\n# # print array\n#\n# # theString = 'saaaay yes no yaaaass'\n# # print theString.strip('say') #say后面有空格\n#\n#\n#\n# # -*- coding:utf-8 -*-\n# import urllib\n# import urllib2\n# import re\n# import thread\n# import time\n#\n#\n# # 糗事百科爬虫类\n# class QSBK:\n# # 初始化方法,定义一些变量\n# def __init__(self):\n# self.pageIndex = 1\n# self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n# # 初始化headers\n# self.headers = {'User-Agent': self.user_agent}\n# # 存放段子的变量,每一个元素是每一页的段子们\n# self.stories = []\n# # 存放程序是否继续运行的变量\n# self.enable = False\n#\n# # 传入某一页的索引获得页面代码\n# def getPage(self, pageIndex):\n# try:\n# url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)\n# # 构建请求的request\n# request = urllib2.Request(url, headers=self.headers)\n# # 利用urlopen获取页面代码\n# response = urllib2.urlopen(request)\n# # 将页面转化为UTF-8编码\n# pageCode = response.read().decode('utf-8')\n# return pageCode\n#\n# except urllib2.URLError, e:\n# if hasattr(e, \"reason\"):\n# print u\"连接糗事百科失败,错误原因\", e.reason\n# return None\n#\n# # 传入某一页代码,返回本页不带图片的段子列表\n# def getPageItems(self, pageIndex):\n# pageCode = self.getPage(pageIndex)\n# if not pageCode:\n# print \"页面加载失败....\"\n# return None\n# # pattern = re.compile('<div class=author clearfix>.*?<img src=.*? alt=(.*?)>.*?<div.*?' +\n# # '<span>(.*?)</span>.*?stats-vote><i class=number>(.*?)</i>.*?' +\n# # '<i class=number>(.*?)</i>', re.S)\n# pattern = re.compile('h2>(.*?)</h2.*?content\">(.*?)</.*?number\">(.*?)</', re.S)\n# items = re.findall(pattern, pageCode)\n# # 用来存储每页的段子们\n# pageStories = []\n# # 遍历正则表达式匹配的信息\n# # for item in items:\n# # # 是否含有图片\n# # haveImg = re.search(\"img\", item[3])\n# # # 如果不含有图片,把它加入list中\n# # if not haveImg:\n# # replaceBR = re.compile('<br/>')\n# # text = re.sub(replaceBR, \"\\n\", item[1])\n# # # item[0]是一个段子的发布者,item[1]是内容,item[2]是发布时间,item[4]是点赞数\n# # pageStories.append([item[0].strip(), text.strip(), item[2].strip(), item[4].strip()])\n# # return pageStories\n# for item in items:\n# pageStories.append([item[0].strip(), item[1].strip(), item[2].strip()])\n# return pageStories\n#\n# # 加载并提取页面的内容,加入到列表中\n# def loadPage(self):\n# # 如果当前未看的页数少于2页,则加载新一页\n# if self.enable == True:\n# if len(self.stories) < 2:\n# # 获取新一页\n# pageStories = self.getPageItems(self.pageIndex)\n# # 将该页的段子存放到全局list中\n# if pageStories:\n# self.stories.append(pageStories)\n# # 获取完之后页码索引加一,表示下次读取下一页\n# self.pageIndex += 1\n#\n# # 调用该方法,每次敲回车打印输出一个段子\n# def getOneStory(self, pageStories, page):\n# # 遍历一页的段子\n# for story in pageStories:\n# # 等待用户输入\n# input = raw_input()\n# # 每当输入回车一次,判断一下是否要加载新页面\n# self.loadPage()\n# # 如果输入Q则程序结束\n# if input == \"Q\":\n# self.enable = False\n# return\n# print u\"第%d页\\t发布人:%s\\t 赞:%s\\n%s\" % (page, story[0], story[2], story[1])\n#\n# # 开始方法\n# def start(self):\n# print u\"正在读取糗事百科,按回车查看新段子,Q退出\"\n# # 使变量为True,程序可以正常运行\n# self.enable = True\n# # 先加载一页内容\n# self.loadPage()\n# # 局部变量,控制当前读到了第几页\n# nowPage = 0\n# while self.enable:\n# if len(self.stories) > 0:\n# # 从全局list中获取一页的段子\n# pageStories = self.stories[0]\n# # 当前读到的页数加一\n# nowPage += 1\n# # 将全局list中第一个元素删除,因为已经取出\n# del self.stories[0]\n# # 输出该页的段子\n# self.getOneStory(pageStories, nowPage)\n#\n#\n# spider = QSBK()\n# spider.start()\n#\n# print [x * x for x in range(1, 11) if x % 2 == 0]\n\n\ndef _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数\n yield n\n it = filter(_not_divisible(n), it) # 构造新序列\n\n\ndef main():\n # 打印1000以内的素数:\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n def f(j):\n # def g():\n return j*j\n # return g\n fs = []\n for i in range(1, 4):\n fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()\n return fs\n\n\nfrom PIL import Image\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\nfrom multiprocessing import Process, Pool\nimport os, time, random\n\n\ndef run_proc(name):\n print(\"Run child process %s %s\" % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, (start - end)))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n # dic = {}\n # with open(\"unicode_pinyin.txt\") as f:\n # for i in f.readlines():\n # dic[i.split()[0]] = i.split()[1]\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n # try:\n # y += dic[i] + ' '\n # except:\n # y += 'XXXX ' # 非法字符我们用XXXX代替\n return y\n\n\nif __name__ == '__main__':\n # main()\n # print(_not_divisible(3))\n # output = filter(is_palindrome, range(1, 1000))\n # print(list(output))\n # print(range(100))[::-1]\n # f1, f2, f3 = count()\n # print(f1)\n # print(f2)\n # print(f3)\n # changeImage()\n # print(\"Parent process %s \", os.getpid())\n # p = Process(target=run_proc, args=(\"test\",))\n # print('Child process will start.')\n # p.start()\n # p.join()\n # print('Child process end.')\n # print(\"Parent process %s \", os.getpid())\n # p = Pool(5)\n # for i in range(5):\n # p.apply_async(long_time_task, args=(i,))\n # print('Waiting for all subprocesses done...')\n # p.close()\n # p.join()\n # print('All subprocesses done.')\n print(chinese_to_pinyin(u\"陈\"))",
"step-ids": [
6,
9,
10,
11,
13
]
}
|
[
6,
9,
10,
11,
13
] |
class Solution:
# @param arrive : list of integers
# @param depart : list of integers
# @param K : integer
# @return a boolean
def hotel(self, arrive, depart, K):
self.count = 0
self.temp = 0
for i in range(len(arrive)):
for j in range(i, len(depart)):
if arrive[j] < arrive[i]:
self.temp = arrive[j]
arrive[j] = arrive[i]
arrive[i] = self.temp
if depart[j] < depart[i]:
self.temp = depart[j]
depart[j] = depart[i]
depart[i] = self.temp
for i in range(len(arrive)):
self.x = i
while (arrive[self.x + 1] < depart[self.x]):
self.count = self.count + 1
self.x = self.x + 1
print ("Count: ",self.count)
print ("K: ", K)
print ("Arrive: ", arrive)
print ("Depart: ", depart)
if self.count < K:
return True
else:
return False
beg = 0
end = len(arrive)
mid = (beg + mid) / 2
for i in range(len(arrive)):
obj = Solution()
l1 = [1,2,3,4]
l2 = [10, 2, 6, 14]
k = 1
print obj.hotel(l1,l2,k)
|
normal
|
{
"blob_id": "de6a6c2dc7bea255e5674663616c962c1d1625e0",
"index": 4138,
"step-1": "class Solution:\n # @param arrive : list of integers\n # @param depart : list of integers\n # @param K : integer\n # @return a boolean\n def hotel(self, arrive, depart, K):\n self.count = 0\n self.temp = 0\n for i in range(len(arrive)):\n for j in range(i, len(depart)):\n if arrive[j] < arrive[i]:\n self.temp = arrive[j]\n arrive[j] = arrive[i]\n arrive[i] = self.temp\n if depart[j] < depart[i]:\n self.temp = depart[j]\n depart[j] = depart[i]\n depart[i] = self.temp\n for i in range(len(arrive)):\n self.x = i \n while (arrive[self.x + 1] < depart[self.x]):\n self.count = self.count + 1\n self.x = self.x + 1\n print (\"Count: \",self.count)\n print (\"K: \", K)\n print (\"Arrive: \", arrive)\n print (\"Depart: \", depart)\n if self.count < K:\n return True\n else:\n return False\n beg = 0\n end = len(arrive)\n mid = (beg + mid) / 2\n \n for i in range(len(arrive)):\n \n\nobj = Solution()\nl1 = [1,2,3,4]\nl2 = [10, 2, 6, 14]\nk = 1\nprint obj.hotel(l1,l2,k)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class UploadCommand(Command):
<|reserved_special_token_0|>
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\x1b[1m{0}\x1b[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Installing required build packages...')
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.
executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\x1b[1m{0}\x1b[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Installing required build packages...')
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.
executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__author__ = 'tcaruso'
<|reserved_special_token_0|>
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
except Exception:
from pip import __version__ as __pip_version__
msg = (
"""Sorry, could not install due to a pip import error. Please open an issue on the repo
with this message and the error so it can be addressed.
pip version: {}
python version: {}
"""
.format(__pip_version__, '.'.join(sys.version_info)))
raise EnvironmentError(msg)
here = os.path.abspath(os.path.dirname(__file__))
PACKAGE_NAME = 'socket_wait'
DESCRIPTION = 'Listen on a port until a connection is received.'
URL = 'https://github.com/tomplex/socket_wait'
EMAIL = 'carusot42@gmail.com'
AUTHOR = 'Tom Caruso'
REQUIRES_PYTHON = 2, 7, 0
PYPI_NAME = '{}'.format(PACKAGE_NAME)
if sys.version_info < REQUIRES_PYTHON:
raise Exception('Package {} requires python >= {}.'.format(PYPI_NAME,
'.'.join(map(str, REQUIRES_PYTHON))))
REQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))
about = {}
<|reserved_special_token_0|>
about['__version__'] = __version__
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\x1b[1m{0}\x1b[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Installing required build packages...')
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.
executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
setup(name=PYPI_NAME, version=about['__version__'], description=DESCRIPTION,
author=AUTHOR, author_email=EMAIL, url=URL, py_modules=['socket_wait'],
include_package_data=True, entry_points={'console_scripts': [
'socket_wait=socket_wait:cli']}, classifiers=[
'Programming Language :: Python', 'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython'],
cmdclass={'upload': UploadCommand})
<|reserved_special_token_1|>
__author__ = 'tcaruso'
import glob
import fnmatch
import os
import sys
import warnings
from shutil import rmtree
from setuptools import find_packages, setup, Command
from collections import namedtuple
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
except Exception:
from pip import __version__ as __pip_version__
msg = (
"""Sorry, could not install due to a pip import error. Please open an issue on the repo
with this message and the error so it can be addressed.
pip version: {}
python version: {}
"""
.format(__pip_version__, '.'.join(sys.version_info)))
raise EnvironmentError(msg)
here = os.path.abspath(os.path.dirname(__file__))
PACKAGE_NAME = 'socket_wait'
DESCRIPTION = 'Listen on a port until a connection is received.'
URL = 'https://github.com/tomplex/socket_wait'
EMAIL = 'carusot42@gmail.com'
AUTHOR = 'Tom Caruso'
REQUIRES_PYTHON = 2, 7, 0
PYPI_NAME = '{}'.format(PACKAGE_NAME)
if sys.version_info < REQUIRES_PYTHON:
raise Exception('Package {} requires python >= {}.'.format(PYPI_NAME,
'.'.join(map(str, REQUIRES_PYTHON))))
REQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))
about = {}
from socket_wait import __version__
about['__version__'] = __version__
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\x1b[1m{0}\x1b[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Installing required build packages...')
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.
executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
setup(name=PYPI_NAME, version=about['__version__'], description=DESCRIPTION,
author=AUTHOR, author_email=EMAIL, url=URL, py_modules=['socket_wait'],
include_package_data=True, entry_points={'console_scripts': [
'socket_wait=socket_wait:cli']}, classifiers=[
'Programming Language :: Python', 'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython'],
cmdclass={'upload': UploadCommand})
<|reserved_special_token_1|>
__author__ = 'tcaruso'
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import fnmatch
import os
import sys
import warnings
from shutil import rmtree
from setuptools import find_packages, setup, Command
from collections import namedtuple
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
except Exception:
from pip import __version__ as __pip_version__
msg = """Sorry, could not install due to a pip import error. Please open an issue on the repo
with this message and the error so it can be addressed.
pip version: {}
python version: {}
""".format(__pip_version__, '.'.join(sys.version_info))
raise EnvironmentError(msg)
here = os.path.abspath(os.path.dirname(__file__))
# ------------------------------------------------
# Package meta-data.
# PACKAGE_NAME is the name of the package directory and the import path. If you use my_package then when installed, you
# will import the package like `import my_package`.
PACKAGE_NAME = 'socket_wait'
DESCRIPTION = 'Listen on a port until a connection is received.'
URL = 'https://github.com/tomplex/socket_wait'
EMAIL = 'carusot42@gmail.com'
AUTHOR = 'Tom Caruso'
# The minimum Python version required
REQUIRES_PYTHON = (2, 7, 0)
# PYPI_NAME is the name of the package on pypi. We'll default to pbvt_{PACKAGE_NAME} so we avoid name collisions
# with PyPI. You'll use this name to install the package.
PYPI_NAME = '{}'.format(PACKAGE_NAME)
# Specify the name of the requirements file we should use. If there is none, then just leave it as is. We'll detect
# ------------------------------------------------
# Check Python version we're installing against. Bail if it's not correct. This will blow up both when we build the
# package and when someone tries to install it.
if sys.version_info < REQUIRES_PYTHON:
# Raise if we're trying to install on an unsupported Python version
raise Exception("Package {} requires python >= {}.".format(PYPI_NAME, '.'.join(map(str, REQUIRES_PYTHON))))
REQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))
# ------------------------------------------------
# Requirements gathering.
about = {}
from socket_wait import __version__
about['__version__'] = __version__
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status("Installing required build packages...")
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
setup(
name=PYPI_NAME,
version=about['__version__'],
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
py_modules=['socket_wait'],
include_package_data=True,
# If your package has a CLI component, specify it in entry_points.
# for example, if you want it to be called like "mycli" from the command line, and the command line entry
# point lives in the somepackage/cli.py file, in the function main, you'd construct it like this:
entry_points={
'console_scripts': ['socket_wait=socket_wait:cli'],
},
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
],
# setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
flexible
|
{
"blob_id": "58438a1fb0b9e620717ba262c25a43bfbf6b8824",
"index": 8100,
"step-1": "<mask token>\n\n\nclass UploadCommand(Command):\n <mask token>\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\n<mask token>\n",
"step-3": "__author__ = 'tcaruso'\n<mask token>\ntry:\n from pip._internal.req import parse_requirements\nexcept ImportError:\n from pip.req import parse_requirements\nexcept Exception:\n from pip import __version__ as __pip_version__\n msg = (\n \"\"\"Sorry, could not install due to a pip import error. Please open an issue on the repo \n with this message and the error so it can be addressed.\n\n pip version: {}\n python version: {}\n\n \"\"\"\n .format(__pip_version__, '.'.join(sys.version_info)))\n raise EnvironmentError(msg)\nhere = os.path.abspath(os.path.dirname(__file__))\nPACKAGE_NAME = 'socket_wait'\nDESCRIPTION = 'Listen on a port until a connection is received.'\nURL = 'https://github.com/tomplex/socket_wait'\nEMAIL = 'carusot42@gmail.com'\nAUTHOR = 'Tom Caruso'\nREQUIRES_PYTHON = 2, 7, 0\nPYPI_NAME = '{}'.format(PACKAGE_NAME)\nif sys.version_info < REQUIRES_PYTHON:\n raise Exception('Package {} requires python >= {}.'.format(PYPI_NAME,\n '.'.join(map(str, REQUIRES_PYTHON))))\nREQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))\nabout = {}\n<mask token>\nabout['__version__'] = __version__\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\nsetup(name=PYPI_NAME, version=about['__version__'], description=DESCRIPTION,\n author=AUTHOR, author_email=EMAIL, url=URL, py_modules=['socket_wait'],\n include_package_data=True, entry_points={'console_scripts': [\n 'socket_wait=socket_wait:cli']}, classifiers=[\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython'],\n cmdclass={'upload': UploadCommand})\n",
"step-4": "__author__ = 'tcaruso'\nimport glob\nimport fnmatch\nimport os\nimport sys\nimport warnings\nfrom shutil import rmtree\nfrom setuptools import find_packages, setup, Command\nfrom collections import namedtuple\ntry:\n from pip._internal.req import parse_requirements\nexcept ImportError:\n from pip.req import parse_requirements\nexcept Exception:\n from pip import __version__ as __pip_version__\n msg = (\n \"\"\"Sorry, could not install due to a pip import error. Please open an issue on the repo \n with this message and the error so it can be addressed.\n\n pip version: {}\n python version: {}\n\n \"\"\"\n .format(__pip_version__, '.'.join(sys.version_info)))\n raise EnvironmentError(msg)\nhere = os.path.abspath(os.path.dirname(__file__))\nPACKAGE_NAME = 'socket_wait'\nDESCRIPTION = 'Listen on a port until a connection is received.'\nURL = 'https://github.com/tomplex/socket_wait'\nEMAIL = 'carusot42@gmail.com'\nAUTHOR = 'Tom Caruso'\nREQUIRES_PYTHON = 2, 7, 0\nPYPI_NAME = '{}'.format(PACKAGE_NAME)\nif sys.version_info < REQUIRES_PYTHON:\n raise Exception('Package {} requires python >= {}.'.format(PYPI_NAME,\n '.'.join(map(str, REQUIRES_PYTHON))))\nREQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))\nabout = {}\nfrom socket_wait import __version__\nabout['__version__'] = __version__\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\nsetup(name=PYPI_NAME, version=about['__version__'], description=DESCRIPTION,\n author=AUTHOR, author_email=EMAIL, url=URL, py_modules=['socket_wait'],\n include_package_data=True, entry_points={'console_scripts': [\n 'socket_wait=socket_wait:cli']}, classifiers=[\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython'],\n cmdclass={'upload': UploadCommand})\n",
"step-5": "__author__ = 'tcaruso'\n\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport glob\nimport fnmatch\nimport os\nimport sys\nimport warnings\nfrom shutil import rmtree\nfrom setuptools import find_packages, setup, Command\nfrom collections import namedtuple\n\ntry:\n from pip._internal.req import parse_requirements\nexcept ImportError:\n from pip.req import parse_requirements\nexcept Exception:\n from pip import __version__ as __pip_version__\n\n msg = \"\"\"Sorry, could not install due to a pip import error. Please open an issue on the repo \n with this message and the error so it can be addressed.\n\n pip version: {}\n python version: {}\n\n \"\"\".format(__pip_version__, '.'.join(sys.version_info))\n raise EnvironmentError(msg)\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# ------------------------------------------------\n\n# Package meta-data.\n# PACKAGE_NAME is the name of the package directory and the import path. If you use my_package then when installed, you\n# will import the package like `import my_package`.\nPACKAGE_NAME = 'socket_wait'\nDESCRIPTION = 'Listen on a port until a connection is received.'\nURL = 'https://github.com/tomplex/socket_wait'\nEMAIL = 'carusot42@gmail.com'\nAUTHOR = 'Tom Caruso'\n# The minimum Python version required\nREQUIRES_PYTHON = (2, 7, 0)\n# PYPI_NAME is the name of the package on pypi. We'll default to pbvt_{PACKAGE_NAME} so we avoid name collisions\n# with PyPI. You'll use this name to install the package.\nPYPI_NAME = '{}'.format(PACKAGE_NAME)\n# Specify the name of the requirements file we should use. If there is none, then just leave it as is. We'll detect\n\n# ------------------------------------------------\n# Check Python version we're installing against. Bail if it's not correct. This will blow up both when we build the\n# package and when someone tries to install it.\n\nif sys.version_info < REQUIRES_PYTHON:\n # Raise if we're trying to install on an unsupported Python version\n raise Exception(\"Package {} requires python >= {}.\".format(PYPI_NAME, '.'.join(map(str, REQUIRES_PYTHON))))\n\nREQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))\n\n\n# ------------------------------------------------\n# Requirements gathering.\n\n\nabout = {}\nfrom socket_wait import __version__\n\nabout['__version__'] = __version__\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\033[1m{0}\\033[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n\n self.status(\"Installing required build packages...\")\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))\n\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n\n sys.exit()\n\n\nsetup(\n name=PYPI_NAME,\n version=about['__version__'],\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n py_modules=['socket_wait'],\n include_package_data=True,\n # If your package has a CLI component, specify it in entry_points.\n # for example, if you want it to be called like \"mycli\" from the command line, and the command line entry\n # point lives in the somepackage/cli.py file, in the function main, you'd construct it like this:\n entry_points={\n 'console_scripts': ['socket_wait=socket_wait:cli'],\n },\n\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n # setup.py publish support.\n cmdclass={\n 'upload': UploadCommand,\n },\n)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
html.H3('Node 6'),
dcc.Dropdown(
id='node-6-dropdown',
options=[
{'label': 'Node 6 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='node-6-display-value'),
])
@app.callback(
Output('node-6-display-value', 'children'),
[Input('node-6-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
|
normal
|
{
"blob_id": "632b90ea5a2ac35539e589af297c04b31bbf02d0",
"index": 3443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.callback(Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n",
"step-3": "<mask token>\nlayout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',\n options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',\n 'MTL', 'LA']]), html.Div(id='node-6-display-value')])\n\n\n@app.callback(Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n",
"step-4": "import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom app import app\nlayout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',\n options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',\n 'MTL', 'LA']]), html.Div(id='node-6-display-value')])\n\n\n@app.callback(Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n",
"step-5": "import dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\nfrom app import app\r\n\r\nlayout = html.Div([\r\n html.H3('Node 6'),\r\n dcc.Dropdown(\r\n id='node-6-dropdown',\r\n options=[\r\n {'label': 'Node 6 - {}'.format(i), 'value': i} for i in [\r\n 'NYC', 'MTL', 'LA'\r\n ]\r\n ]\r\n ),\r\n html.Div(id='node-6-display-value'),\r\n\r\n])\r\n\r\n\r\n@app.callback(\r\n Output('node-6-display-value', 'children'),\r\n [Input('node-6-dropdown', 'value')])\r\ndef display_value(value):\r\n return 'You have selected \"{}\"'.format(value)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Given two binary trees, write a function to check if they are equal or not.
#
# Two binary trees are considered equal if they are structurally identical and the nodes have the same value.
#
# Return 0 / 1 ( 0 for false, 1 for true ) for this problem
#
# Example :
#
# Input :
#
# 1 1
# / \ / \
# 2 3 2 3
#
# Output :
# 1 or True
from Level6.Trees.BinaryTree import BinaryTree
class Solution:
def solution(self, rootA, rootB):
if rootA == rootB:
print('h')
return True
if rootA is None or rootB is None:
return False
# if rootA is None and rootB is None:
# return True
return ((rootA.val == rootB.val) and self.solution(rootA.left, rootB.left) and
self.solution(rootA.right, rootB.right))
A = BinaryTree()
A.insert(100)
A.insert(102)
A.insert(96)
B = BinaryTree()
B.insert(100)
B.insert(102)
B.insert(96)
res = Solution().solution(A.root, B.root)
print(res)
|
normal
|
{
"blob_id": "4a0eca90de3ce7fb0ab6decb0ec6aadb32c1a9fa",
"index": 601,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\n<mask token>\nA.insert(100)\nA.insert(102)\nA.insert(96)\n<mask token>\nB.insert(100)\nB.insert(102)\nB.insert(96)\n<mask token>\nprint(res)\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\nA = BinaryTree()\nA.insert(100)\nA.insert(102)\nA.insert(96)\nB = BinaryTree()\nB.insert(100)\nB.insert(102)\nB.insert(96)\nres = Solution().solution(A.root, B.root)\nprint(res)\n",
"step-4": "from Level6.Trees.BinaryTree import BinaryTree\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n if rootA == rootB:\n print('h')\n return True\n if rootA is None or rootB is None:\n return False\n return rootA.val == rootB.val and self.solution(rootA.left, rootB.left\n ) and self.solution(rootA.right, rootB.right)\n\n\nA = BinaryTree()\nA.insert(100)\nA.insert(102)\nA.insert(96)\nB = BinaryTree()\nB.insert(100)\nB.insert(102)\nB.insert(96)\nres = Solution().solution(A.root, B.root)\nprint(res)\n",
"step-5": "# Given two binary trees, write a function to check if they are equal or not.\n#\n# Two binary trees are considered equal if they are structurally identical and the nodes have the same value.\n#\n# Return 0 / 1 ( 0 for false, 1 for true ) for this problem\n#\n# Example :\n#\n# Input :\n#\n# 1 1\n# / \\ / \\\n# 2 3 2 3\n#\n# Output :\n# 1 or True\n\n\nfrom Level6.Trees.BinaryTree import BinaryTree\n\n\nclass Solution:\n\n def solution(self, rootA, rootB):\n\n if rootA == rootB:\n print('h')\n return True\n\n if rootA is None or rootB is None:\n return False\n\n # if rootA is None and rootB is None:\n # return True\n\n return ((rootA.val == rootB.val) and self.solution(rootA.left, rootB.left) and\n self.solution(rootA.right, rootB.right))\n\n\nA = BinaryTree()\nA.insert(100)\nA.insert(102)\nA.insert(96)\nB = BinaryTree()\nB.insert(100)\nB.insert(102)\nB.insert(96)\nres = Solution().solution(A.root, B.root)\nprint(res)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
<|reserved_special_token_0|>
def on_connect(client, userdata, flags, rc):
print('Connected with result code ' + str(rc))
client.subscribe('#')
def notifyTelegram(message):
print('Notifying Telegram: ' + message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True
else:
result = timer - last_notify[topic] > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if 'temperature' in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram('Temperature out of bounds: ' + value + 'degC')
return True
if 'CO' in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram('Carbon Monoxide level above threshold: ' +
value)
return True
if 'All_Gas' in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram('Poison gas level above threshold: ' + value)
return True
if 'alarm' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('ALARM in Living room is On!')
return True
if 'MotionHUE' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('HUE Motion sensor detected movement!')
return True
return False
def on_message(client, userdata, msg):
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.
payload))
if topic not in topicsOfInterest:
print('Topic: ', topic, ' from ', msg, ' not in the interest list')
return
if 'empty' in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0
value = str(msg.payload)
if limitsExsess(topic, value) or timer - last_record[topic
] > RECORD_INTERVAL:
print('Updating records')
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata, rc=0):
print('DisConnected result code ' + str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print('UTC: ', time.ctime(), 'log: ', buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = HOME_DIR
credential_dir = os.path.join(home_dir, '.credentials')
print('Credentials folder: ', credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(spreadsheetId=
SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
receiveTime = getUTC_TIME()
json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {
'value': float(value)}}]
print('Writing to InfluxDB: ', json_body)
dbclient.write_points(json_body)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(os.path.join(__location__, 'config.json'), 'r') as f:
config = json.load(f)
<|reserved_special_token_0|>
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
print('Initializing...')
def on_connect(client, userdata, flags, rc):
print('Connected with result code ' + str(rc))
client.subscribe('#')
def notifyTelegram(message):
print('Notifying Telegram: ' + message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True
else:
result = timer - last_notify[topic] > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if 'temperature' in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram('Temperature out of bounds: ' + value + 'degC')
return True
if 'CO' in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram('Carbon Monoxide level above threshold: ' +
value)
return True
if 'All_Gas' in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram('Poison gas level above threshold: ' + value)
return True
if 'alarm' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('ALARM in Living room is On!')
return True
if 'MotionHUE' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('HUE Motion sensor detected movement!')
return True
return False
def on_message(client, userdata, msg):
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.
payload))
if topic not in topicsOfInterest:
print('Topic: ', topic, ' from ', msg, ' not in the interest list')
return
if 'empty' in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0
value = str(msg.payload)
if limitsExsess(topic, value) or timer - last_record[topic
] > RECORD_INTERVAL:
print('Updating records')
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata, rc=0):
print('DisConnected result code ' + str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print('UTC: ', time.ctime(), 'log: ', buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = HOME_DIR
credential_dir = os.path.join(home_dir, '.credentials')
print('Credentials folder: ', credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(spreadsheetId=
SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
receiveTime = getUTC_TIME()
json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {
'value': float(value)}}]
print('Writing to InfluxDB: ', json_body)
dbclient.write_points(json_body)
return
<|reserved_special_token_0|>
def update_entries(service, entries):
range = NUM_ENTRIES_CELL
value_input_option = 'USER_ENTERED'
values = [[entries]]
body = {'values': values}
request = service.spreadsheets().values().update(spreadsheetId=
SPREADSHEET_ID, range=range, valueInputOption=value_input_option,
body=body)
response = request.execute()
return response
if __name__ == '__main__':
global service
connectedGoogle = False
connectedMQTT = False
global dbclient
global warmedUp
warmedUp = False
dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')
startTime = time.time()
bot = telepot.Bot(telegramToken)
bot.getMe()
client = mqtt.Client('monitor')
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
while not connectedMQTT:
try:
client.connect(localBroker, localPort, keepalive=6000)
connectedMQTT = True
except:
print('Connection to MQTT broker failed')
print('exception: ', sys.exc_info()[0])
time.sleep(1)
client.loop_start()
while True:
time.sleep(10)
if not warmedUp:
warmedUp = time.time() - startTime > WARM_UP_THRESH
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEBUG = False
UTC_OFFSET = 3
RECORD_INTERVAL = 5 * 60
NOTIFY_INTERVAL = 1 * 60
HOME_DIR = '/home/pi'
localTimeOut = 120
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(
__file__)))
last_record = {}
last_notify = {}
with open(os.path.join(__location__, 'config.json'), 'r') as f:
config = json.load(f)
telegramToken = config['telegramToken']
RPi_HOST = config['RPi_HOST']
SPREADSHEET_ID = config['SPREADSHEET_ID']
API_KEY = config['API_KEY']
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
NUM_ENTRIES_CELL = 'InputData!E2'
SHEET_ID = 0
localBroker = RPi_HOST
localPort = 1883
MAX_TEMPERATURE = 30
MIN_TEMPERATURE = 15
CARBON_MONOXIDE_ADC_THRESH = 5000
GAS_ALL_ADC_THRESH = 12000
WARM_UP_THRESH = 300
topicsOfInterest = ['/sensor/Chipa/humidity', '/sensor/Chipa/temperature',
'/sensor/Chipa/CO', '/sensor/Chipa/All_Gas', '/sensor/livingRoom/alarm',
'/sensor/MotionHUE', '/empty']
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
print('Initializing...')
def on_connect(client, userdata, flags, rc):
print('Connected with result code ' + str(rc))
client.subscribe('#')
def notifyTelegram(message):
print('Notifying Telegram: ' + message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True
else:
result = timer - last_notify[topic] > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if 'temperature' in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram('Temperature out of bounds: ' + value + 'degC')
return True
if 'CO' in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram('Carbon Monoxide level above threshold: ' +
value)
return True
if 'All_Gas' in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram('Poison gas level above threshold: ' + value)
return True
if 'alarm' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('ALARM in Living room is On!')
return True
if 'MotionHUE' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('HUE Motion sensor detected movement!')
return True
return False
def on_message(client, userdata, msg):
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.
payload))
if topic not in topicsOfInterest:
print('Topic: ', topic, ' from ', msg, ' not in the interest list')
return
if 'empty' in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0
value = str(msg.payload)
if limitsExsess(topic, value) or timer - last_record[topic
] > RECORD_INTERVAL:
print('Updating records')
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata, rc=0):
print('DisConnected result code ' + str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print('UTC: ', time.ctime(), 'log: ', buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = HOME_DIR
credential_dir = os.path.join(home_dir, '.credentials')
print('Credentials folder: ', credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(spreadsheetId=
SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
receiveTime = getUTC_TIME()
json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {
'value': float(value)}}]
print('Writing to InfluxDB: ', json_body)
dbclient.write_points(json_body)
return
<|reserved_special_token_0|>
def update_entries(service, entries):
range = NUM_ENTRIES_CELL
value_input_option = 'USER_ENTERED'
values = [[entries]]
body = {'values': values}
request = service.spreadsheets().values().update(spreadsheetId=
SPREADSHEET_ID, range=range, valueInputOption=value_input_option,
body=body)
response = request.execute()
return response
if __name__ == '__main__':
global service
connectedGoogle = False
connectedMQTT = False
global dbclient
global warmedUp
warmedUp = False
dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')
startTime = time.time()
bot = telepot.Bot(telegramToken)
bot.getMe()
client = mqtt.Client('monitor')
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
while not connectedMQTT:
try:
client.connect(localBroker, localPort, keepalive=6000)
connectedMQTT = True
except:
print('Connection to MQTT broker failed')
print('exception: ', sys.exc_info()[0])
time.sleep(1)
client.loop_start()
while True:
time.sleep(10)
if not warmedUp:
warmedUp = time.time() - startTime > WARM_UP_THRESH
<|reserved_special_token_1|>
import time
import datetime
import os
import string
import paho.mqtt.client as mqtt
import telepot
import json
from influxdb import InfluxDBClient
import sys
DEBUG = False
UTC_OFFSET = 3
RECORD_INTERVAL = 5 * 60
NOTIFY_INTERVAL = 1 * 60
HOME_DIR = '/home/pi'
localTimeOut = 120
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(
__file__)))
last_record = {}
last_notify = {}
with open(os.path.join(__location__, 'config.json'), 'r') as f:
config = json.load(f)
telegramToken = config['telegramToken']
RPi_HOST = config['RPi_HOST']
SPREADSHEET_ID = config['SPREADSHEET_ID']
API_KEY = config['API_KEY']
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
NUM_ENTRIES_CELL = 'InputData!E2'
SHEET_ID = 0
localBroker = RPi_HOST
localPort = 1883
MAX_TEMPERATURE = 30
MIN_TEMPERATURE = 15
CARBON_MONOXIDE_ADC_THRESH = 5000
GAS_ALL_ADC_THRESH = 12000
WARM_UP_THRESH = 300
topicsOfInterest = ['/sensor/Chipa/humidity', '/sensor/Chipa/temperature',
'/sensor/Chipa/CO', '/sensor/Chipa/All_Gas', '/sensor/livingRoom/alarm',
'/sensor/MotionHUE', '/empty']
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
print('Initializing...')
def on_connect(client, userdata, flags, rc):
print('Connected with result code ' + str(rc))
client.subscribe('#')
def notifyTelegram(message):
print('Notifying Telegram: ' + message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True
else:
result = timer - last_notify[topic] > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if 'temperature' in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram('Temperature out of bounds: ' + value + 'degC')
return True
if 'CO' in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram('Carbon Monoxide level above threshold: ' +
value)
return True
if 'All_Gas' in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram('Poison gas level above threshold: ' + value)
return True
if 'alarm' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('ALARM in Living room is On!')
return True
if 'MotionHUE' in topic:
val = float(value)
if int(val) == 1:
notifyTelegram('HUE Motion sensor detected movement!')
return True
return False
def on_message(client, userdata, msg):
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.
payload))
if topic not in topicsOfInterest:
print('Topic: ', topic, ' from ', msg, ' not in the interest list')
return
if 'empty' in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0
value = str(msg.payload)
if limitsExsess(topic, value) or timer - last_record[topic
] > RECORD_INTERVAL:
print('Updating records')
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata, rc=0):
print('DisConnected result code ' + str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print('UTC: ', time.ctime(), 'log: ', buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = HOME_DIR
credential_dir = os.path.join(home_dir, '.credentials')
print('Credentials folder: ', credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(spreadsheetId=
SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
receiveTime = getUTC_TIME()
json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {
'value': float(value)}}]
print('Writing to InfluxDB: ', json_body)
dbclient.write_points(json_body)
return
<|reserved_special_token_0|>
def update_entries(service, entries):
range = NUM_ENTRIES_CELL
value_input_option = 'USER_ENTERED'
values = [[entries]]
body = {'values': values}
request = service.spreadsheets().values().update(spreadsheetId=
SPREADSHEET_ID, range=range, valueInputOption=value_input_option,
body=body)
response = request.execute()
return response
if __name__ == '__main__':
global service
connectedGoogle = False
connectedMQTT = False
global dbclient
global warmedUp
warmedUp = False
dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')
startTime = time.time()
bot = telepot.Bot(telegramToken)
bot.getMe()
client = mqtt.Client('monitor')
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
while not connectedMQTT:
try:
client.connect(localBroker, localPort, keepalive=6000)
connectedMQTT = True
except:
print('Connection to MQTT broker failed')
print('exception: ', sys.exc_info()[0])
time.sleep(1)
client.loop_start()
while True:
time.sleep(10)
if not warmedUp:
warmedUp = time.time() - startTime > WARM_UP_THRESH
<|reserved_special_token_1|>
#!/usr/bin/env python
###########################################################################
# 1) connect to the MQTT broker
# 2) subscribe to the available data streams
# 3) log to google sheets
# 4) notify on critical events on the telegram channel
###########################################################################
import time
import datetime
import os
import string
import paho.mqtt.client as mqtt
#import requests
#from googleapiclient import discovery
#from oauth2client import client
#from oauth2client import tools
#from oauth2client.file import Storage
import telepot
import json
from influxdb import InfluxDBClient
import sys
DEBUG = False
UTC_OFFSET = 3 # hours of differenc between UTC and local (Jerusalem) time
RECORD_INTERVAL = 5*60 #number if seconds between subsequent recods in google sheets and InfluxDB
NOTIFY_INTERVAL = 1*60 #number if seconds between subsequent notification on telegram
HOME_DIR = '/home/pi' #home directory
localTimeOut = 120 # Local MQTT session timeout
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
last_record = {}
last_notify = {}
# get configuration from json
with open( os.path.join(__location__, 'config.json'), 'r') as f:
config = json.load(f)
telegramToken = config['telegramToken']
RPi_HOST = config['RPi_HOST']
SPREADSHEET_ID = config['SPREADSHEET_ID']
API_KEY = config['API_KEY']
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
NUM_ENTRIES_CELL = "InputData!E2"
SHEET_ID = 0
localBroker = RPi_HOST # Local MQTT broker
localPort = 1883 # Local MQTT port
#limits
MAX_TEMPERATURE = 30
MIN_TEMPERATURE = 15
CARBON_MONOXIDE_ADC_THRESH = 5000
GAS_ALL_ADC_THRESH = 12000
WARM_UP_THRESH = 300 # number of seconds from start up, after which start up sensors are sample
topicsOfInterest = ["/sensor/Chipa/humidity",
"/sensor/Chipa/temperature",
"/sensor/Chipa/CO",
"/sensor/Chipa/All_Gas",
"/sensor/livingRoom/alarm",
"/sensor/MotionHUE",
"/empty"
]
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
#Generic Init
print ("Initializing...")
def on_connect(client, userdata, flags, rc):
#MQTT configs
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("#")
def notifyTelegram(message):
print("Notifying Telegram: "+message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True #if event happens for first time, notify
else:
result = (timer - last_notify[topic]) > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer # update occurance
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if "temperature" in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram("Temperature out of bounds: "+value+"degC")
return True
if "CO" in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram("Carbon Monoxide level above threshold: "+value)
return True
if "All_Gas" in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram("Poison gas level above threshold: "+value)
return True
if "alarm" in topic:
val = float(value)
if int(val) == 1:
notifyTelegram("ALARM in Living room is On!")
return True
if "MotionHUE" in topic:
val = float(value)
if int(val) == 1:
notifyTelegram("HUE Motion sensor detected movement!")
return True
return False
def on_message(client, userdata, msg):
# The callback for when a PUBLISH message is received from the server.
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print("UTCtime: "+currTime.ctime()+","+msg.topic+" "+str(msg.payload))
if topic not in topicsOfInterest:
print("Topic: ",topic," from ",msg," not in the interest list")
return
if "empty" in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0 #to assure first time is updated
value = str(msg.payload)
if limitsExsess(topic, value) or ((timer-last_record[topic]) > RECORD_INTERVAL):
print("Updating records")
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata,rc=0):
print("DisConnected result code "+str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print("UTC: ", time.ctime(), "log: ", buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
#home_dir = os.path.expanduser('~')
home_dir = (HOME_DIR)
credential_dir = os.path.join(home_dir, '.credentials')
print("Credentials folder: ",credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(
spreadsheetId=SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
# Update InfluxDB
receiveTime = getUTC_TIME()
json_body = [
{
"measurement": topic,
"time": receiveTime,
"fields": {
"value": float(value)
}
}
]
print("Writing to InfluxDB: ", json_body)
dbclient.write_points(json_body)
return
''' #update Google Sheets
entries = number_of_entries(service)
currTime = getUTC_TIME()
line_num = str(2 + entries)
range = "InputData!A"+line_num+":D"+line_num
# How the input data should be interpreted.
value_input_option = 'USER_ENTERED'
values = [ [ currTime, topic, value ] ]
body = {'values': values}
request = service.spreadsheets().values().update(
spreadsheetId=SPREADSHEET_ID,
range=range,
valueInputOption=value_input_option,
body=body)
response = request.execute()
update_entries(service,entries+1)
return response '''
def update_entries(service,entries):
#Update Google Sheet
range = NUM_ENTRIES_CELL
value_input_option = 'USER_ENTERED'
values = [
[
entries
] ]
body = {'values': values}
request = service.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=range,
valueInputOption=value_input_option, body=body
)
response = request.execute()
return response
if __name__ == "__main__":
global service
connectedGoogle = False
connectedMQTT = False
global dbclient
global warmedUp #indicate WARM UP Threshold passed, and gas filters can be sampled
warmedUp = False #indicate WARM UP Threshold passed, and gas filters can be sampled
dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')
startTime = time.time()
#establish Telegram Bot
bot = telepot.Bot(telegramToken)
bot.getMe()
# while not connectedGoogle:
# try:
# service = create_service()
# connectedGoogle = True
# except:
# print ("failed to connect to google sheets, retrying")
# time.sleep(1)
client = mqtt.Client("monitor")
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
while not connectedMQTT:
try:
client.connect(localBroker, localPort, keepalive = 6000)
connectedMQTT = True
except:
print("Connection to MQTT broker failed")
print("exception: ",sys.exc_info()[0])
time.sleep(1)
client.loop_start()
while True:
time.sleep(10)
#client.publish("/empty","0")
if not warmedUp:
warmedUp = (time.time() - startTime) > WARM_UP_THRESH
|
flexible
|
{
"blob_id": "0295d6ba962d099e76110c7a0e39748e3163e300",
"index": 5541,
"step-1": "<mask token>\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\n<mask token>\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\n<mask token>\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\nprint('Initializing...')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n\n\ndef update_entries(service, entries):\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [[entries]]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=\n SPREADSHEET_ID, range=range, valueInputOption=value_input_option,\n body=body)\n response = request.execute()\n return response\n\n\nif __name__ == '__main__':\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp\n warmedUp = False\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n client = mqtt.Client('monitor')\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive=6000)\n connectedMQTT = True\n except:\n print('Connection to MQTT broker failed')\n print('exception: ', sys.exc_info()[0])\n time.sleep(1)\n client.loop_start()\n while True:\n time.sleep(10)\n if not warmedUp:\n warmedUp = time.time() - startTime > WARM_UP_THRESH\n",
"step-3": "<mask token>\nDEBUG = False\nUTC_OFFSET = 3\nRECORD_INTERVAL = 5 * 60\nNOTIFY_INTERVAL = 1 * 60\nHOME_DIR = '/home/pi'\nlocalTimeOut = 120\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(\n __file__)))\nlast_record = {}\nlast_notify = {}\nwith open(os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\ntelegramToken = config['telegramToken']\nRPi_HOST = config['RPi_HOST']\nSPREADSHEET_ID = config['SPREADSHEET_ID']\nAPI_KEY = config['API_KEY']\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\nNUM_ENTRIES_CELL = 'InputData!E2'\nSHEET_ID = 0\nlocalBroker = RPi_HOST\nlocalPort = 1883\nMAX_TEMPERATURE = 30\nMIN_TEMPERATURE = 15\nCARBON_MONOXIDE_ADC_THRESH = 5000\nGAS_ALL_ADC_THRESH = 12000\nWARM_UP_THRESH = 300\ntopicsOfInterest = ['/sensor/Chipa/humidity', '/sensor/Chipa/temperature',\n '/sensor/Chipa/CO', '/sensor/Chipa/All_Gas', '/sensor/livingRoom/alarm',\n '/sensor/MotionHUE', '/empty']\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\nprint('Initializing...')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n\n\ndef update_entries(service, entries):\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [[entries]]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=\n SPREADSHEET_ID, range=range, valueInputOption=value_input_option,\n body=body)\n response = request.execute()\n return response\n\n\nif __name__ == '__main__':\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp\n warmedUp = False\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n client = mqtt.Client('monitor')\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive=6000)\n connectedMQTT = True\n except:\n print('Connection to MQTT broker failed')\n print('exception: ', sys.exc_info()[0])\n time.sleep(1)\n client.loop_start()\n while True:\n time.sleep(10)\n if not warmedUp:\n warmedUp = time.time() - startTime > WARM_UP_THRESH\n",
"step-4": "import time\nimport datetime\nimport os\nimport string\nimport paho.mqtt.client as mqtt\nimport telepot\nimport json\nfrom influxdb import InfluxDBClient\nimport sys\nDEBUG = False\nUTC_OFFSET = 3\nRECORD_INTERVAL = 5 * 60\nNOTIFY_INTERVAL = 1 * 60\nHOME_DIR = '/home/pi'\nlocalTimeOut = 120\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(\n __file__)))\nlast_record = {}\nlast_notify = {}\nwith open(os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\ntelegramToken = config['telegramToken']\nRPi_HOST = config['RPi_HOST']\nSPREADSHEET_ID = config['SPREADSHEET_ID']\nAPI_KEY = config['API_KEY']\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\nNUM_ENTRIES_CELL = 'InputData!E2'\nSHEET_ID = 0\nlocalBroker = RPi_HOST\nlocalPort = 1883\nMAX_TEMPERATURE = 30\nMIN_TEMPERATURE = 15\nCARBON_MONOXIDE_ADC_THRESH = 5000\nGAS_ALL_ADC_THRESH = 12000\nWARM_UP_THRESH = 300\ntopicsOfInterest = ['/sensor/Chipa/humidity', '/sensor/Chipa/temperature',\n '/sensor/Chipa/CO', '/sensor/Chipa/All_Gas', '/sensor/livingRoom/alarm',\n '/sensor/MotionHUE', '/empty']\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\nprint('Initializing...')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n\n\ndef update_entries(service, entries):\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [[entries]]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=\n SPREADSHEET_ID, range=range, valueInputOption=value_input_option,\n body=body)\n response = request.execute()\n return response\n\n\nif __name__ == '__main__':\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp\n warmedUp = False\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n client = mqtt.Client('monitor')\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive=6000)\n connectedMQTT = True\n except:\n print('Connection to MQTT broker failed')\n print('exception: ', sys.exc_info()[0])\n time.sleep(1)\n client.loop_start()\n while True:\n time.sleep(10)\n if not warmedUp:\n warmedUp = time.time() - startTime > WARM_UP_THRESH\n",
"step-5": "#!/usr/bin/env python\n ###########################################################################\n# 1) connect to the MQTT broker\n# 2) subscribe to the available data streams\n# 3) log to google sheets\n# 4) notify on critical events on the telegram channel\n###########################################################################\n\nimport time\nimport datetime\nimport os\nimport string\nimport paho.mqtt.client as mqtt\n#import requests\n#from googleapiclient import discovery\n#from oauth2client import client\n#from oauth2client import tools\n#from oauth2client.file import Storage\nimport telepot\nimport json\nfrom influxdb import InfluxDBClient\nimport sys\n\nDEBUG = False\nUTC_OFFSET = 3 # hours of differenc between UTC and local (Jerusalem) time\nRECORD_INTERVAL = 5*60 #number if seconds between subsequent recods in google sheets and InfluxDB\nNOTIFY_INTERVAL = 1*60 #number if seconds between subsequent notification on telegram\nHOME_DIR = '/home/pi' #home directory\nlocalTimeOut = 120\t\t\t# Local MQTT session timeout\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nlast_record = {}\nlast_notify = {}\n\n\n# get configuration from json\nwith open( os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\n\ntelegramToken = config['telegramToken']\nRPi_HOST = config['RPi_HOST']\nSPREADSHEET_ID = config['SPREADSHEET_ID']\nAPI_KEY = config['API_KEY']\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\nNUM_ENTRIES_CELL = \"InputData!E2\"\nSHEET_ID = 0\nlocalBroker = RPi_HOST\t\t# Local MQTT broker\nlocalPort = 1883\t\t\t# Local MQTT port\n\n#limits\nMAX_TEMPERATURE = 30\nMIN_TEMPERATURE = 15\nCARBON_MONOXIDE_ADC_THRESH = 5000\nGAS_ALL_ADC_THRESH = 12000\n\nWARM_UP_THRESH = 300 # number of seconds from start up, after which start up sensors are sample\n\ntopicsOfInterest = [\"/sensor/Chipa/humidity\",\n \"/sensor/Chipa/temperature\",\n \"/sensor/Chipa/CO\",\n \"/sensor/Chipa/All_Gas\",\n \"/sensor/livingRoom/alarm\",\n \"/sensor/MotionHUE\",\n \"/empty\"\n ]\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\n#Generic Init\nprint (\"Initializing...\")\n\n\ndef on_connect(client, userdata, flags, rc):\n #MQTT configs\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"#\")\n\n\ndef notifyTelegram(message):\n print(\"Notifying Telegram: \"+message)\n bot.sendMessage(504721552, message)\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True #if event happens for first time, notify\n else:\n result = (timer - last_notify[topic]) > NOTIFY_INTERVAL\n if result == True: \n last_notify[topic] = timer # update occurance\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n\n if isNotifyTime(topic):\n if \"temperature\" in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram(\"Temperature out of bounds: \"+value+\"degC\")\n return True\n if \"CO\" in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram(\"Carbon Monoxide level above threshold: \"+value)\n return True\n if \"All_Gas\" in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram(\"Poison gas level above threshold: \"+value)\n return True\n if \"alarm\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"ALARM in Living room is On!\")\n return True\n if \"MotionHUE\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"HUE Motion sensor detected movement!\")\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n # The callback for when a PUBLISH message is received from the server.\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print(\"UTCtime: \"+currTime.ctime()+\",\"+msg.topic+\" \"+str(msg.payload))\n if topic not in topicsOfInterest:\n print(\"Topic: \",topic,\" from \",msg,\" not in the interest list\")\n return\n if \"empty\" in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0 #to assure first time is updated\n value = str(msg.payload)\n if limitsExsess(topic, value) or ((timer-last_record[topic]) > RECORD_INTERVAL):\n print(\"Updating records\")\n update_records(topic, value)\n last_record[topic] = timer \n return\n\n\ndef on_disconnect(client, userdata,rc=0):\n print(\"DisConnected result code \"+str(rc))\n client.loop_stop()\n\ndef on_log(client, userdata, level, buf):\n print(\"UTC: \", time.ctime(), \"log: \", buf)\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n #home_dir = os.path.expanduser('~')\n home_dir = (HOME_DIR)\n credential_dir = os.path.join(home_dir, '.credentials')\n print(\"Credentials folder: \",credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(\n spreadsheetId=SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n\n # Update InfluxDB\n receiveTime = getUTC_TIME()\n json_body = [\n {\n \"measurement\": topic,\n \"time\": receiveTime,\n \"fields\": {\n \"value\": float(value)\n }\n }\n ]\n print(\"Writing to InfluxDB: \", json_body)\n dbclient.write_points(json_body)\n return\n\n''' #update Google Sheets\n entries = number_of_entries(service)\n currTime = getUTC_TIME()\n line_num = str(2 + entries)\n range = \"InputData!A\"+line_num+\":D\"+line_num\n\n # How the input data should be interpreted.\n value_input_option = 'USER_ENTERED'\n\n values = [ [ currTime, topic, value ] ]\n body = {'values': values}\n\n request = service.spreadsheets().values().update(\n spreadsheetId=SPREADSHEET_ID, \n range=range, \n valueInputOption=value_input_option, \n body=body)\n\n response = request.execute()\n update_entries(service,entries+1)\n\n return response '''\n\n\ndef update_entries(service,entries):\n #Update Google Sheet\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [\n [\n entries\n ] ]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=range,\n valueInputOption=value_input_option, body=body\n )\n response = request.execute()\n\n return response\n\nif __name__ == \"__main__\":\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp #indicate WARM UP Threshold passed, and gas filters can be sampled\n warmedUp = False #indicate WARM UP Threshold passed, and gas filters can be sampled\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n\n #establish Telegram Bot\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n\n # while not connectedGoogle:\n # try:\n # service = create_service()\n # connectedGoogle = True\n # except:\n # print (\"failed to connect to google sheets, retrying\")\n # time.sleep(1)\n\n client = mqtt.Client(\"monitor\")\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n\n\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive = 6000)\n connectedMQTT = True\n except:\n print(\"Connection to MQTT broker failed\")\n print(\"exception: \",sys.exc_info()[0])\n time.sleep(1)\n \n client.loop_start()\n while True:\n time.sleep(10)\n #client.publish(\"/empty\",\"0\")\n if not warmedUp:\n warmedUp = (time.time() - startTime) > WARM_UP_THRESH\n",
"step-ids": [
13,
15,
16,
17,
18
]
}
|
[
13,
15,
16,
17,
18
] |
#!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "c5e003d625d7798eaf4ef5bca28f6311edccb316",
"index": 7235,
"step-1": "<mask token>\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\n<mask token>\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\n<mask token>\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\ndef CreateNewWorkload(waclient, workloadName, description, reviewOwner,\n environment, awsRegions, lenses, tags, pillarPriorities, notes='',\n nonAwsRegions=[], architecturalDesign='', industryType='', industry='',\n accountIds=[]):\n try:\n response = waclient.create_workload(WorkloadName=workloadName,\n Description=description, ReviewOwner=reviewOwner, Environment=\n environment, AwsRegions=awsRegions, Lenses=lenses,\n NonAwsRegions=nonAwsRegions, ArchitecturalDesign=\n architecturalDesign, IndustryType=industryType, Industry=\n industry, Notes=notes, AccountIds=accountIds)\n except waclient.exceptions.ConflictException as e:\n workloadId, workloadARN = FindWorkload(waclient, workloadName)\n logger.error(\n 'ERROR - The workload name %s already exists as workloadId %s' %\n (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\ndef GetWorkload(waclient, workloadId):\n try:\n response = waclient.get_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n exit()\n workload = response['Workload']\n return workload\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'\n , datefmt='%Y-%m-%d %H:%M:%S')\n<mask token>\nlogging.getLogger('boto3').setLevel(logging.CRITICAL)\nlogging.getLogger('botocore').setLevel(logging.CRITICAL)\nlogging.getLogger('s3transfer').setLevel(logging.CRITICAL)\nlogging.getLogger('urllib3').setLevel(logging.CRITICAL)\n<mask token>\nPARSER.add_argument('-p', '--profile', required=False, default='default',\n help='AWS CLI Profile Name')\nPARSER.add_argument('-r', '--region', required=False, default='us-east-1',\n help='From Region Name. Example: us-east-1')\nPARSER.add_argument('-w', '--workloadid', required=False, default='', help=\n 'Workload Id to use instead of creating a TEMP workload')\nPARSER.add_argument('-k', '--keeptempworkload', action='store_true', help=\n 'If you want to keep the TEMP workload created at the end of the export')\nPARSER.add_argument('-f', '--fileName', required=True, default=\n './demo.xlsx', help='FileName to export XLSX')\nPARSER.add_argument('-v', '--debug', action='store_true', help=\n 'print debug messages to stderr')\n<mask token>\nif ARGUMENTS.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n<mask token>\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\ndef CreateNewWorkload(waclient, workloadName, description, reviewOwner,\n environment, awsRegions, lenses, tags, pillarPriorities, notes='',\n nonAwsRegions=[], architecturalDesign='', industryType='', industry='',\n accountIds=[]):\n try:\n response = waclient.create_workload(WorkloadName=workloadName,\n Description=description, ReviewOwner=reviewOwner, Environment=\n environment, AwsRegions=awsRegions, Lenses=lenses,\n NonAwsRegions=nonAwsRegions, ArchitecturalDesign=\n architecturalDesign, IndustryType=industryType, Industry=\n industry, Notes=notes, AccountIds=accountIds)\n except waclient.exceptions.ConflictException as e:\n workloadId, workloadARN = FindWorkload(waclient, workloadName)\n logger.error(\n 'ERROR - The workload name %s already exists as workloadId %s' %\n (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\ndef GetWorkload(waclient, workloadId):\n try:\n response = waclient.get_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n exit()\n workload = response['Workload']\n return workload\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import botocore\nimport boto3\nimport json\nimport datetime\nimport logging\nimport jmespath\nimport xlsxwriter\nimport argparse\nfrom pkg_resources import packaging\nimport urllib.request\nfrom bs4 import BeautifulSoup, NavigableString, Tag\n__author__ = 'Eric Pullen'\n__email__ = 'eppullen@amazon.com'\n__copyright__ = (\n 'Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.')\n__credits__ = ['Eric Pullen']\n__version__ = '0.1'\nREGION_NAME = 'us-east-1'\nblankjson = {}\nresponse = ''\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'\n , datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger()\nlogging.getLogger('boto3').setLevel(logging.CRITICAL)\nlogging.getLogger('botocore').setLevel(logging.CRITICAL)\nlogging.getLogger('s3transfer').setLevel(logging.CRITICAL)\nlogging.getLogger('urllib3').setLevel(logging.CRITICAL)\nPARSER = argparse.ArgumentParser(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n \"\"\"This utility has two options to run:\n------------------------------------\n1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.\n2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.\n \"\"\"\n )\nPARSER.add_argument('-p', '--profile', required=False, default='default',\n help='AWS CLI Profile Name')\nPARSER.add_argument('-r', '--region', required=False, default='us-east-1',\n help='From Region Name. Example: us-east-1')\nPARSER.add_argument('-w', '--workloadid', required=False, default='', help=\n 'Workload Id to use instead of creating a TEMP workload')\nPARSER.add_argument('-k', '--keeptempworkload', action='store_true', help=\n 'If you want to keep the TEMP workload created at the end of the export')\nPARSER.add_argument('-f', '--fileName', required=True, default=\n './demo.xlsx', help='FileName to export XLSX')\nPARSER.add_argument('-v', '--debug', action='store_true', help=\n 'print debug messages to stderr')\nARGUMENTS = PARSER.parse_args()\nPROFILE = ARGUMENTS.profile\nFILENAME = ARGUMENTS.fileName\nREGION_NAME = ARGUMENTS.region\nWORKLOADID = ARGUMENTS.workloadid\nKEEPTEMP = ARGUMENTS.keeptempworkload\nif ARGUMENTS.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\nPILLAR_PARSE_MAP = {'operationalExcellence': 'OPS', 'security': 'SEC',\n 'reliability': 'REL', 'performance': 'PERF', 'costOptimization': 'COST'}\nPILLAR_PROPER_NAME_MAP = {'operationalExcellence': 'Operational Excellence',\n 'security': 'Security', 'reliability': 'Reliability', 'performance':\n 'Performance Efficiency', 'costOptimization': 'Cost Optimization'}\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\ndef CreateNewWorkload(waclient, workloadName, description, reviewOwner,\n environment, awsRegions, lenses, tags, pillarPriorities, notes='',\n nonAwsRegions=[], architecturalDesign='', industryType='', industry='',\n accountIds=[]):\n try:\n response = waclient.create_workload(WorkloadName=workloadName,\n Description=description, ReviewOwner=reviewOwner, Environment=\n environment, AwsRegions=awsRegions, Lenses=lenses,\n NonAwsRegions=nonAwsRegions, ArchitecturalDesign=\n architecturalDesign, IndustryType=industryType, Industry=\n industry, Notes=notes, AccountIds=accountIds)\n except waclient.exceptions.ConflictException as e:\n workloadId, workloadARN = FindWorkload(waclient, workloadName)\n logger.error(\n 'ERROR - The workload name %s already exists as workloadId %s' %\n (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\ndef GetWorkload(waclient, workloadId):\n try:\n response = waclient.get_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n exit()\n workload = response['Workload']\n return workload\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n# This is a tool to export the WA framework answers to a XLSX file\n#\n# This code is only for use in Well-Architected labs\n# *** NOT FOR PRODUCTION USE ***\n#\n# Licensed under the Apache 2.0 and MITnoAttr License.\n#\n# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance with the License. A copy of the License is located at\n# https://aws.amazon.com/apache2.0/\n\nimport botocore\nimport boto3\nimport json\nimport datetime\nimport logging\nimport jmespath\nimport xlsxwriter\nimport argparse\nfrom pkg_resources import packaging\nimport urllib.request\nfrom bs4 import BeautifulSoup, NavigableString, Tag\n\n\n__author__ = \"Eric Pullen\"\n__email__ = \"eppullen@amazon.com\"\n__copyright__ = \"Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\"\n__credits__ = [\"Eric Pullen\"]\n__version__ = \"0.1\"\n\n# Default region listed here\nREGION_NAME = \"us-east-1\"\nblankjson = {}\nresponse = \"\"\n\n# Setup Logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n)\n\nlogger = logging.getLogger()\nlogging.getLogger('boto3').setLevel(logging.CRITICAL)\nlogging.getLogger('botocore').setLevel(logging.CRITICAL)\nlogging.getLogger('s3transfer').setLevel(logging.CRITICAL)\nlogging.getLogger('urllib3').setLevel(logging.CRITICAL)\n\nPARSER = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\\\nThis utility has two options to run:\n------------------------------------\n1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.\n2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.\n '''\n )\n\nPARSER.add_argument('-p','--profile', required=False, default=\"default\", help='AWS CLI Profile Name')\nPARSER.add_argument('-r','--region', required=False, default=\"us-east-1\", help='From Region Name. Example: us-east-1')\nPARSER.add_argument('-w','--workloadid', required=False, default=\"\", help='Workload Id to use instead of creating a TEMP workload')\nPARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')\n\nPARSER.add_argument('-f','--fileName', required=True, default=\"./demo.xlsx\", help='FileName to export XLSX')\nPARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')\n\n\nARGUMENTS = PARSER.parse_args()\nPROFILE = ARGUMENTS.profile\nFILENAME = ARGUMENTS.fileName\nREGION_NAME = ARGUMENTS.region\nWORKLOADID = ARGUMENTS.workloadid\nKEEPTEMP = ARGUMENTS.keeptempworkload\n\nif ARGUMENTS.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n\n# To map our short hand names in the console to the API defined pillars\n# Example: print(PILLAR_PARSE_MAP['performance'])\nPILLAR_PARSE_MAP = {\n \"operationalExcellence\": \"OPS\",\n \"security\": \"SEC\",\n \"reliability\": \"REL\",\n \"performance\": \"PERF\",\n \"costOptimization\": \"COST\"\n }\n\nPILLAR_PROPER_NAME_MAP = {\n \"operationalExcellence\": \"Operational Excellence\",\n \"security\": \"Security\",\n \"reliability\": \"Reliability\",\n \"performance\": \"Performance Efficiency\",\n \"costOptimization\": \"Cost Optimization\"\n}\n\n# Helper class to convert a datetime item to JSON.\nclass DateTimeEncoder(json.JSONEncoder):\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return (str(z))\n else:\n return super().default(z)\n\ndef CreateNewWorkload(\n waclient,\n workloadName,\n description,\n reviewOwner,\n environment,\n awsRegions,\n lenses,\n tags,\n pillarPriorities,\n notes=\"\",\n nonAwsRegions=[],\n architecturalDesign='',\n industryType='',\n industry='',\n accountIds=[]\n ):\n # Create your workload\n try:\n response=waclient.create_workload(\n WorkloadName=workloadName,\n Description=description,\n ReviewOwner=reviewOwner,\n Environment=environment,\n AwsRegions=awsRegions,\n Lenses=lenses,\n NonAwsRegions=nonAwsRegions,\n ArchitecturalDesign=architecturalDesign,\n IndustryType=industryType,\n Industry=industry,\n Notes=notes,\n AccountIds=accountIds\n )\n except waclient.exceptions.ConflictException as e:\n workloadId,workloadARN = FindWorkload(waclient,workloadName)\n logger.error(\"ERROR - The workload name %s already exists as workloadId %s\" % (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\ndef FindWorkload(\n waclient,\n workloadName\n ):\n # Finding your WorkloadId\n try:\n response=waclient.list_workloads(\n WorkloadNamePrefix=workloadName\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(\"Full JSON:\",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n # print(\"WorkloadId\",workloadId)\n return workloadId, workloadArn\n\ndef DeleteWorkload(\n waclient,\n workloadId\n ):\n\n # Delete the WorkloadId\n try:\n response=waclient.delete_workload(\n WorkloadId=workloadId\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\ndef GetWorkload(\n waclient,\n workloadId\n ):\n\n # Get the WorkloadId\n try:\n response=waclient.get_workload(\n WorkloadId=workloadId\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n exit()\n\n # print(\"Full JSON:\",json.dumps(response['Workload'], cls=DateTimeEncoder))\n workload = response['Workload']\n # print(\"WorkloadId\",workloadId)\n return workload\n\ndef listLens(\n waclient\n ):\n # List all lenses currently available\n try:\n response=waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(json.dumps(response))\n lenses = jmespath.search(\"LensSummaries[*].LensAlias\", response)\n\n return lenses\n\ndef getCurrentLensVersion(\n waclient,\n lensAlias\n ):\n\n # List all lenses currently available\n try:\n response=waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(json.dumps(response))\n searchString = \"LensSummaries[?LensAlias==`\"+lensAlias+\"`].LensVersion\"\n lenses = jmespath.search(searchString, response)\n\n return lenses[0]\n\ndef findAllQuestionId(\n waclient,\n workloadId,\n lensAlias\n ):\n\n answers = []\n # Due to a bug in some lenses, I have to iterate over each pillar in order to\n # retrieve the correct results.\n for pillar in PILLAR_PARSE_MAP:\n logger.debug(\"Grabbing answers for %s %s\" % (lensAlias, pillar))\n # Find a questionID using the questionTitle\n try:\n response=waclient.list_answers(\n WorkloadId=workloadId,\n LensAlias=lensAlias,\n PillarId=pillar\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n answers.extend(response[\"AnswerSummaries\"])\n while \"NextToken\" in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response[\"NextToken\"])\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n answers.extend(response[\"AnswerSummaries\"])\n return answers\n\ndef getQuestionDetails(\n waclient,\n workloadId,\n lensAlias,\n questionId\n ):\n\n # Find a answer for a questionId\n try:\n response=waclient.get_answer(\n WorkloadId=workloadId,\n LensAlias=lensAlias,\n QuestionId=questionId\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n\n\n qDescription = jmespath.search(\"Answer.QuestionDescription\", response)\n qImprovementPlanUrl = jmespath.search(\"Answer.ImprovementPlanUrl\", response)\n qHelpfulResourceUrl = jmespath.search(\"Answer.HelpfulResourceUrl\", response)\n qNotes = jmespath.search(\"Answer.Notes\", response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(\n waclient,\n workloadId,\n lensAlias,\n questionId,\n selectedChoices,\n notes\n ):\n\n # Update a answer to a question\n try:\n response=waclient.update_answer(\n WorkloadId=workloadId,\n LensAlias=lensAlias,\n QuestionId=questionId,\n SelectedChoices=selectedChoices,\n Notes=notes\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(json.dumps(response))\n jmesquery = \"Answer.SelectedChoices\"\n answers = jmespath.search(jmesquery, response)\n return answers\n\ndef getImprovementPlanItems(\n waclient,\n workloadId,\n lensAlias,\n QuestionId,\n PillarId,\n ImprovementPlanUrl,\n ChoiceList\n):\n # This will parse the IP Items to gather the links we need\n response = {}\n htmlString = \"\"\n # unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode(\"utf8\")\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line,features=\"html.parser\")\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\ndef getImprovementPlanHTMLDescription(\n ImprovementPlanUrl,\n PillarId\n ):\n\n logger.debug(\"ImprovementPlanUrl: %s for pillar %s \" % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n\n # Grab the number of the step we are referencing\n # This will work as long as their are less than 99 steps.\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n\n #Generate the string for the step number\n firstItem = \"step\"+stepNumber\n secondItem = (\"step\"+str((int(stepNumber)+1)))\n logger.debug (\"Going from %s to %s\" % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode(\"utf8\")\n htmlSplit = htmlStr.split('\\n')\n\n foundit = 0\n ipString = \"\"\n questionIdText = \"\"\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i,features=\"html.parser\")\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if (secondItem in i) or (\"</div>\" in i):\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString+=i\n elif foundit:\n ipString+=i\n\n prettyHTML = BeautifulSoup(ipString,features=\"html.parser\")\n # Need to remove all of the \"local glossary links\" since they point to relative paths\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n\n return prettyHTML, questionIdText\n\ndef lensTabCreation(\n WACLIENT,\n workloadId,\n lens,\n workbook,\n allQuestionsForLens,\n workloadName=\"\",\n AWSAccountId=\"\",\n workloadDescription=\"\"\n ):\n\n # Setup some formatting for the workbook\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'text_wrap': True\n })\n bold_border_bold = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'text_wrap': True,\n 'font_size': 20,\n 'bold': True\n })\n\n heading = workbook.add_format({\n 'font_size': 24,\n 'bold': True\n })\n\n lineA = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'bg_color': '#E0EBF6',\n 'align': 'top',\n 'text_wrap': True\n })\n\n lineB = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'bg_color': '#E4EFDC',\n 'align': 'top',\n 'text_wrap': True\n })\n\n lineAnoborder = workbook.add_format({\n 'border': 0,\n 'top': 1,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E0EBF6',\n 'align': 'top',\n 'text_wrap': True\n })\n\n lineBnoborder = workbook.add_format({\n 'border': 0,\n 'top': 1,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E4EFDC',\n 'align': 'top',\n 'text_wrap': True\n })\n\n\n lineAhidden = workbook.add_format({\n 'border': 0,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E0EBF6',\n 'align': 'top',\n 'text_wrap': False,\n 'indent': 100\n })\n\n lineBhidden = workbook.add_format({\n 'border': 0,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E4EFDC',\n 'align': 'top',\n 'text_wrap': False,\n 'indent': 100\n })\n\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n\n # Get the current version of Lens\n logger.debug(\"Getting lens version for '\"+lens+\"'\")\n versionString = getCurrentLensVersion(WACLIENT,lens)\n logger.debug(\"Adding worksheet using version \"+versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet((lensName+' v'+versionString))\n # Print in landscape\n worksheet.set_landscape()\n # Set to 8.5x11 paper size\n worksheet.set_paper(1)\n\n # Set the column widths\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n\n # Top of sheet\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n\n # If we are using an existing workload, then display the Name, ID, and Description at the top\n # or else just make it blank\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)\n\n # Subheadings for columns\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n\n # Freeze the top of the sheet\n worksheet.freeze_panes(8,0)\n\n # AutoFilter on the first two columns\n worksheet.autofilter('A8:B8')\n\n # Make it easier to print\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n\n # Starting point for pillar questions\n cellPosition = 8\n\n # Starting cell look with lineA. Will switch back and forth\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n for pillar in PILLAR_PARSE_MAP:\n # This is the question number for each pillar (ex: OPS1, OPS2, etc)\n qNum = 1\n\n # The query will return all questions for a lens and pillar\n jmesquery = \"[?PillarId=='\"+pillar+\"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n\n # For each of the possible answers, parse them and put into the Worksheet\n for answers in allQuestionsForPillar:\n # List all best practices\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+\" - \"+answers['QuestionTitle']\n qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])\n # Some of the questions have extra whitespaces and I need to remove those to fit into the cell\n qDescription = qDescription.replace('\\n ','').replace(' ','').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n\n logger.debug(\"Working on '\"+questionTitle+\"'\")\n logger.debug(\"It has answers of: \"+json.dumps(answers['SelectedChoices']))\n\n cellID = cellPosition + 1\n\n # If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\"+answers['QuestionId']+\"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)\n else:\n ipList = []\n\n startingCellID=cellID\n # If its the first time through this particular pillar question:\n # I want to only write the name once, but I need to fill in\n # each cell with the same data so the autosort works properly\n # (else it will only show the first best practice)\n firstTimePillar=True\n\n for choices in answers['Choices']:\n\n # Write the pillar name and question in every cell for autosort, but only show the first one\n cell = 'A'+str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)\n cell = 'B'+str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar=False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)\n cell = 'B'+str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n\n # Start writing each of the BP's, details, etc\n cell = 'D'+str(cellID)\n Title = choices['Title'].replace(' ','').replace('\\t', '').replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)\n #ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])\n #htmlString = ipItemHTML.text\n htmlString = \"\" \n htmlString = htmlString.replace('\\n ','').replace(' ','').replace('\\t', '').strip().rstrip()\n # print(htmlString)\n worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})\n else:\n worksheet.write(cell,Title,myCell)\n\n # Add all Details for each best practice/choice\n cell = 'E'+str(cellID)\n # Remove all of the extra spaces in the description field\n Description = choices['Description'].replace('\\n ','')\n Description = Description.replace('\\n ','')\n Description = Description.replace(' ','').replace('\\t', '').replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description ,myCell)\n\n # If this is an existing workload, we will show SELECTED if the have it checked\n # I would love to use a XLSX checkbox, but this library doesn't support it\n cell = 'F'+str(cellID)\n responseText = \"\"\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = \"SELECTED\"\n else:\n responseText = \"\"\n worksheet.write(cell, responseText ,myCell)\n cellID+=1\n\n # We are out of the choice/detail/response loop, so know how many rows were consumed\n # and we can create the explanation and notes field to span all of them\n # Explanantion field\n cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)\n worksheet.merge_range(cellMerge, qDescription,myCell)\n\n # Notes field\n cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, \"\", myCell)\n\n cellID-=1\n # Increase the question number\n qNum += 1\n # Reset the starting cellPosition to the last cellID\n cellPosition = cellID\n\n # Reset the cell formatting to alternate between the two colors\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\ndef main():\n boto3_min_version = \"1.16.38\"\n # Verify if the version of Boto3 we are running has the wellarchitected APIs included\n if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):\n logger.error(\"Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)\" % (boto3.__version__, boto3_min_version))\n exit()\n\n logger.info(\"Script version %s\" % __version__)\n logger.info(\"Starting Boto %s Session\" % boto3.__version__)\n # Create a new boto3 session\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n # Initiate the well-architected session using the region defined above\n WACLIENT = SESSION1.client(\n service_name='wellarchitected',\n region_name=REGION_NAME,\n )\n\n # If this is an existing workload, we need to query for the various workload properties\n if WORKLOADID:\n logger.info(\"User specified workload id of %s\" % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT,WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info(\"Lenses for %s: %s\" % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT= workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n # In order to gather all of the questions, you must create a TEMP Workload\n logger.info(\"No workload ID specified, we will create a TEMP workload\")\n # Grab all lenses that are currently available\n LENSES = listLens(WACLIENT)\n logger.info(\"Lenses available: \"+json.dumps(LENSES))\n # Set the needed workload variables before we create it\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT= 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n # Creating the TEMP workload\n logger.info(\"Creating a new workload to gather questions and answers\")\n workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,\"[]\",\"[]\")\n\n\n\n # Create an new xlsx file and add a worksheet.\n logger.info(\"Creating xlsx file '\"+FILENAME+\"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n\n # Simple hack to get Wellarchitected base framework first (reverse sort)\n # This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)\n LENSES.sort(reverse=True)\n\n # Iterate over each lens that we either have added or is in the workload\n for lens in LENSES:\n # Grab all questions for a particular lens\n allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)\n if WORKLOADID:\n # If this is an existing workload, just go ahead and create the Tab and cells\n logger.debug(\"Not answering questions for existing workload\")\n lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)\n else:\n # If this is the TEMP workload, we need to first gather all of the questionIDs possible\n jmesquery = \"[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}\"\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n # Next we answer all of the questions across all lenses in the TEMP workload\n for question in allQuestionIds:\n logger.debug(\"Answering question %s in the %s lens\" % (question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')\n # Once the questions have been answered, we go ahead and create the tab for each\n lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)\n\n\n # Close out the workbook file\n logger.info(\"Closing Workbook File\")\n workbook.close()\n\n # If this is TEMP workload, we may remove it if it has not been set to keep\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info(\"Removing TEMP Workload\")\n DeleteWorkload(WACLIENT, workloadId)\n logger.info(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
13,
15,
16,
18,
19
]
}
|
[
13,
15,
16,
18,
19
] |
from wasserstoff.wasserstoff import Config, Environment
__all__ = ['Config', 'Environment']
|
normal
|
{
"blob_id": "862b529741d9c3e6cf7ca50272c8af724c56ac62",
"index": 404,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['Config', 'Environment']\n",
"step-3": "from wasserstoff.wasserstoff import Config, Environment\n__all__ = ['Config', 'Environment']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'find_result_window.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FindResultWindow(object):
def setupUi(self, FindResultWindow):
FindResultWindow.setObjectName("FindResultWindow")
FindResultWindow.resize(801, 546)
self.centralwidget = QtWidgets.QWidget(FindResultWindow)
self.centralwidget.setObjectName("centralwidget")
self.btnEdit = QtWidgets.QPushButton(self.centralwidget)
self.btnEdit.setEnabled(False)
self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))
self.btnEdit.setCheckable(False)
self.btnEdit.setAutoDefault(False)
self.btnEdit.setObjectName("btnEdit")
self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)
self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))
self.listWidgetFindResult.setObjectName("listWidgetFindResult")
FindResultWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(FindResultWindow)
QtCore.QMetaObject.connectSlotsByName(FindResultWindow)
def retranslateUi(self, FindResultWindow):
_translate = QtCore.QCoreApplication.translate
FindResultWindow.setWindowTitle(_translate("FindResultWindow", "Информация о приборах"))
self.btnEdit.setText(_translate("FindResultWindow", "Изменить данные"))
|
normal
|
{
"blob_id": "2fdbf418b5cec50ee6568897e0e749681efeef6b",
"index": 6584,
"step-1": "<mask token>\n\n\nclass Ui_FindResultWindow(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_FindResultWindow(object):\n <mask token>\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate('FindResultWindow',\n 'Информация о приборах'))\n self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))\n",
"step-3": "<mask token>\n\n\nclass Ui_FindResultWindow(object):\n\n def setupUi(self, FindResultWindow):\n FindResultWindow.setObjectName('FindResultWindow')\n FindResultWindow.resize(801, 546)\n self.centralwidget = QtWidgets.QWidget(FindResultWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.btnEdit = QtWidgets.QPushButton(self.centralwidget)\n self.btnEdit.setEnabled(False)\n self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))\n self.btnEdit.setCheckable(False)\n self.btnEdit.setAutoDefault(False)\n self.btnEdit.setObjectName('btnEdit')\n self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)\n self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))\n self.listWidgetFindResult.setObjectName('listWidgetFindResult')\n FindResultWindow.setCentralWidget(self.centralwidget)\n self.retranslateUi(FindResultWindow)\n QtCore.QMetaObject.connectSlotsByName(FindResultWindow)\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate('FindResultWindow',\n 'Информация о приборах'))\n self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FindResultWindow(object):\n\n def setupUi(self, FindResultWindow):\n FindResultWindow.setObjectName('FindResultWindow')\n FindResultWindow.resize(801, 546)\n self.centralwidget = QtWidgets.QWidget(FindResultWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.btnEdit = QtWidgets.QPushButton(self.centralwidget)\n self.btnEdit.setEnabled(False)\n self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))\n self.btnEdit.setCheckable(False)\n self.btnEdit.setAutoDefault(False)\n self.btnEdit.setObjectName('btnEdit')\n self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)\n self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))\n self.listWidgetFindResult.setObjectName('listWidgetFindResult')\n FindResultWindow.setCentralWidget(self.centralwidget)\n self.retranslateUi(FindResultWindow)\n QtCore.QMetaObject.connectSlotsByName(FindResultWindow)\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate('FindResultWindow',\n 'Информация о приборах'))\n self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'find_result_window.ui'\n#\n# Created by: PyQt5 UI code generator 5.12.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FindResultWindow(object):\n def setupUi(self, FindResultWindow):\n FindResultWindow.setObjectName(\"FindResultWindow\")\n FindResultWindow.resize(801, 546)\n self.centralwidget = QtWidgets.QWidget(FindResultWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.btnEdit = QtWidgets.QPushButton(self.centralwidget)\n self.btnEdit.setEnabled(False)\n self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))\n self.btnEdit.setCheckable(False)\n self.btnEdit.setAutoDefault(False)\n self.btnEdit.setObjectName(\"btnEdit\")\n self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)\n self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))\n self.listWidgetFindResult.setObjectName(\"listWidgetFindResult\")\n FindResultWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(FindResultWindow)\n QtCore.QMetaObject.connectSlotsByName(FindResultWindow)\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate(\"FindResultWindow\", \"Информация о приборах\"))\n self.btnEdit.setText(_translate(\"FindResultWindow\", \"Изменить данные\"))\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python2
# A basic example of sending Blue a command in cartesian space.
from blue_interface import BlueInterface
import numpy as np
import time
import sys
import argparse
import Leap
from utils.rotations import quat2euler, euler2quat, mat2euler
from utils.leap_listener import SampleListener
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='switch the control mode')
parser.add_argument('--IK', default=False, action='store_true',
help='switch to IK-control')
args = parser.parse_args()
side = "right"
ip = "127.0.0.1"
blue = BlueInterface(side, ip)
# Initialize the blue gripper
blue.calibrate_gripper()
# Leap Motion
listener = SampleListener()
controller = Leap.Controller()
target_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])
target_angles_hist = target_angles_init.copy()
i = 0
while True:
hands_data = listener.get_hand(controller)
## IK approach
if args.IK:
if "Right hand" in hands_data.keys():
hand_data = hands_data["Right hand"]
pos = hand_data['palm_position']
ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]
grab_strength = hand_data['grab_strength']
target_position = [x/1000 for x in pos] # x, y, z
pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z
ori[0], ori[1], ori[2] = ori[2], -ori[0]+3.14, ori[1] # z y x to x y z
# Adjust the offset
target_position[0] -= 0.4
target_position[2] += 0.3
target_orientation = list(euler2quat(ori)) # w, x, y, z
# target_orientation = target_orientation[1:] + target_orientation[:1]
# Compute IK solution
goal_curr = blue.inverse_kinematics(target_position, target_orientation)
# Send command to robot
if goal_curr != []:
goal = goal_curr
print("goal: ", goal)
blue.set_joint_positions(goal, duration=3, soft_position_control=False)
blue.command_gripper(grab_strength, 10.0, wait=False)
# Wait for system to settle
i+=1
time.sleep(3)
# Direct motor angle mapping approach
else:
if "Right hand" in hands_data.keys():
hand_data = hands_data["Right hand"]
pos = hand_data['palm_position']
ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]
grab_strength = hand_data['grab_strength']
pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z
ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1] # z y x to x y z
target_position = [x/1000 for x in pos] # x, y, z
target_position[0] += 0.05
target_position[2] -= 0.2
# Pre-defined Initial position of the robot
target_angles = target_angles_init.copy()
# orientation
target_angles[0] += (ori[0]*1 + target_position[1]*1.5) # shoulder dir
target_angles[4] += ori[2] # arm twist
target_angles[5] += ori[1]*2 # wrist up down
target_angles[6] += ori[2] # wrist twist
# height
target_angles[1] += target_position[2]*5
target_angles[3] -= target_position[2]*5
# depth direction stretch
target_angles[3] -= target_position[0]*10
smoothening = True
if smoothening:
alpha = 0.9
target_angles = target_angles*(1-alpha) + target_angles_hist*alpha
target_angles_hist = target_angles
# Send command to robot
print("target_angles: ", target_angles)
blue.set_joint_positions(target_angles, duration=0.0025, soft_position_control=False)
if "Left hand" in hands_data.keys():
hand_data = hands_data["Left hand"]
pos = hand_data['palm_position']
ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]
grab_strength = hand_data['grab_strength']
blue.command_gripper(ori[1], 20.0, wait=False)
# Wait for system to settle
i+=1
time.sleep(0.025)
|
normal
|
{
"blob_id": "b34e293b509328c728909262594bdf3d3ecf5360",
"index": 4364,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--IK', default=False, action='store_true', help=\n 'switch to IK-control')\n<mask token>\nblue.calibrate_gripper()\n<mask token>\nwhile True:\n hands_data = listener.get_hand(controller)\n if args.IK:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [(x / 1000) for x in pos]\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], -ori[0] + 3.14, ori[1]\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori))\n goal_curr = blue.inverse_kinematics(target_position,\n target_orientation)\n if goal_curr != []:\n goal = goal_curr\n print('goal: ', goal)\n blue.set_joint_positions(goal, duration=3,\n soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n i += 1\n time.sleep(3)\n else:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1]\n target_position = [(x / 1000) for x in pos]\n target_position[0] += 0.05\n target_position[2] -= 0.2\n target_angles = target_angles_init.copy()\n target_angles[0] += ori[0] * 1 + target_position[1] * 1.5\n target_angles[4] += ori[2]\n target_angles[5] += ori[1] * 2\n target_angles[6] += ori[2]\n target_angles[1] += target_position[2] * 5\n target_angles[3] -= target_position[2] * 5\n target_angles[3] -= target_position[0] * 10\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles * (1 - alpha\n ) + target_angles_hist * alpha\n target_angles_hist = target_angles\n print('target_angles: ', target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025,\n soft_position_control=False)\n if 'Left hand' in hands_data.keys():\n hand_data = hands_data['Left hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n i += 1\n time.sleep(0.025)\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser(description='switch the control mode')\nparser.add_argument('--IK', default=False, action='store_true', help=\n 'switch to IK-control')\nargs = parser.parse_args()\nside = 'right'\nip = '127.0.0.1'\nblue = BlueInterface(side, ip)\nblue.calibrate_gripper()\nlistener = SampleListener()\ncontroller = Leap.Controller()\ntarget_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])\ntarget_angles_hist = target_angles_init.copy()\ni = 0\nwhile True:\n hands_data = listener.get_hand(controller)\n if args.IK:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [(x / 1000) for x in pos]\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], -ori[0] + 3.14, ori[1]\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori))\n goal_curr = blue.inverse_kinematics(target_position,\n target_orientation)\n if goal_curr != []:\n goal = goal_curr\n print('goal: ', goal)\n blue.set_joint_positions(goal, duration=3,\n soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n i += 1\n time.sleep(3)\n else:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1]\n target_position = [(x / 1000) for x in pos]\n target_position[0] += 0.05\n target_position[2] -= 0.2\n target_angles = target_angles_init.copy()\n target_angles[0] += ori[0] * 1 + target_position[1] * 1.5\n target_angles[4] += ori[2]\n target_angles[5] += ori[1] * 2\n target_angles[6] += ori[2]\n target_angles[1] += target_position[2] * 5\n target_angles[3] -= target_position[2] * 5\n target_angles[3] -= target_position[0] * 10\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles * (1 - alpha\n ) + target_angles_hist * alpha\n target_angles_hist = target_angles\n print('target_angles: ', target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025,\n soft_position_control=False)\n if 'Left hand' in hands_data.keys():\n hand_data = hands_data['Left hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n i += 1\n time.sleep(0.025)\n",
"step-4": "from blue_interface import BlueInterface\nimport numpy as np\nimport time\nimport sys\nimport argparse\nimport Leap\nfrom utils.rotations import quat2euler, euler2quat, mat2euler\nfrom utils.leap_listener import SampleListener\nimport matplotlib.pyplot as plt\nparser = argparse.ArgumentParser(description='switch the control mode')\nparser.add_argument('--IK', default=False, action='store_true', help=\n 'switch to IK-control')\nargs = parser.parse_args()\nside = 'right'\nip = '127.0.0.1'\nblue = BlueInterface(side, ip)\nblue.calibrate_gripper()\nlistener = SampleListener()\ncontroller = Leap.Controller()\ntarget_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])\ntarget_angles_hist = target_angles_init.copy()\ni = 0\nwhile True:\n hands_data = listener.get_hand(controller)\n if args.IK:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [(x / 1000) for x in pos]\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], -ori[0] + 3.14, ori[1]\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori))\n goal_curr = blue.inverse_kinematics(target_position,\n target_orientation)\n if goal_curr != []:\n goal = goal_curr\n print('goal: ', goal)\n blue.set_joint_positions(goal, duration=3,\n soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n i += 1\n time.sleep(3)\n else:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1]\n target_position = [(x / 1000) for x in pos]\n target_position[0] += 0.05\n target_position[2] -= 0.2\n target_angles = target_angles_init.copy()\n target_angles[0] += ori[0] * 1 + target_position[1] * 1.5\n target_angles[4] += ori[2]\n target_angles[5] += ori[1] * 2\n target_angles[6] += ori[2]\n target_angles[1] += target_position[2] * 5\n target_angles[3] -= target_position[2] * 5\n target_angles[3] -= target_position[0] * 10\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles * (1 - alpha\n ) + target_angles_hist * alpha\n target_angles_hist = target_angles\n print('target_angles: ', target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025,\n soft_position_control=False)\n if 'Left hand' in hands_data.keys():\n hand_data = hands_data['Left hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n i += 1\n time.sleep(0.025)\n",
"step-5": "#!/usr/bin/env python2\n\n# A basic example of sending Blue a command in cartesian space.\nfrom blue_interface import BlueInterface\nimport numpy as np\nimport time\nimport sys\nimport argparse\n\nimport Leap\nfrom utils.rotations import quat2euler, euler2quat, mat2euler\nfrom utils.leap_listener import SampleListener\n\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='switch the control mode')\nparser.add_argument('--IK', default=False, action='store_true',\n help='switch to IK-control')\nargs = parser.parse_args()\n\nside = \"right\"\nip = \"127.0.0.1\"\nblue = BlueInterface(side, ip)\n# Initialize the blue gripper\nblue.calibrate_gripper()\n\n# Leap Motion\nlistener = SampleListener()\ncontroller = Leap.Controller()\n\n\ntarget_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])\ntarget_angles_hist = target_angles_init.copy()\n\ni = 0\nwhile True:\n hands_data = listener.get_hand(controller)\n\n ## IK approach\n if args.IK:\n if \"Right hand\" in hands_data.keys():\n hand_data = hands_data[\"Right hand\"]\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [x/1000 for x in pos] # x, y, z\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z\n ori[0], ori[1], ori[2] = ori[2], -ori[0]+3.14, ori[1] # z y x to x y z\n\n # Adjust the offset\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori)) # w, x, y, z\n # target_orientation = target_orientation[1:] + target_orientation[:1]\n\n # Compute IK solution\n goal_curr = blue.inverse_kinematics(target_position, target_orientation)\n # Send command to robot\n if goal_curr != []:\n goal = goal_curr\n print(\"goal: \", goal)\n blue.set_joint_positions(goal, duration=3, soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n\n # Wait for system to settle\n i+=1\n time.sleep(3)\n\n # Direct motor angle mapping approach\n else:\n if \"Right hand\" in hands_data.keys():\n hand_data = hands_data[\"Right hand\"]\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1] # z y x to x y z\n target_position = [x/1000 for x in pos] # x, y, z\n target_position[0] += 0.05\n target_position[2] -= 0.2\n\n # Pre-defined Initial position of the robot\n target_angles = target_angles_init.copy()\n\n # orientation\n target_angles[0] += (ori[0]*1 + target_position[1]*1.5) # shoulder dir\n target_angles[4] += ori[2] # arm twist\n target_angles[5] += ori[1]*2 # wrist up down\n target_angles[6] += ori[2] # wrist twist\n\n # height\n target_angles[1] += target_position[2]*5\n target_angles[3] -= target_position[2]*5\n\n # depth direction stretch\n target_angles[3] -= target_position[0]*10\n\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles*(1-alpha) + target_angles_hist*alpha\n target_angles_hist = target_angles\n\n # Send command to robot\n print(\"target_angles: \", target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025, soft_position_control=False)\n\n if \"Left hand\" in hands_data.keys():\n hand_data = hands_data[\"Left hand\"]\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n\n # Wait for system to settle\n i+=1\n time.sleep(0.025)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Tuple: ', new_tuple)
print('List: ', new_list)
<|reserved_special_token_0|>
print('Converted tuple from the list : ', tuple_2)
<|reserved_special_token_1|>
new_tuple = 11, 12, 13, 14, 15, 16, 17
new_list = ['one', 12, 'three', 14, 'five']
print('Tuple: ', new_tuple)
print('List: ', new_list)
tuple_2 = tuple(new_list)
print('Converted tuple from the list : ', tuple_2)
<|reserved_special_token_1|>
new_tuple = (11,12,13,14,15,16,17)
new_list = ['one' ,12,'three' ,14,'five']
print("Tuple: ",new_tuple)
print("List: ", new_list)
tuple_2= tuple (new_list)
print("Converted tuple from the list : ", tuple_2)
|
flexible
|
{
"blob_id": "889fdca3f92f218e6d6fd3d02d49483f16a64899",
"index": 9117,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Tuple: ', new_tuple)\nprint('List: ', new_list)\n<mask token>\nprint('Converted tuple from the list : ', tuple_2)\n",
"step-3": "new_tuple = 11, 12, 13, 14, 15, 16, 17\nnew_list = ['one', 12, 'three', 14, 'five']\nprint('Tuple: ', new_tuple)\nprint('List: ', new_list)\ntuple_2 = tuple(new_list)\nprint('Converted tuple from the list : ', tuple_2)\n",
"step-4": "new_tuple = (11,12,13,14,15,16,17)\nnew_list = ['one' ,12,'three' ,14,'five'] \nprint(\"Tuple: \",new_tuple)\nprint(\"List: \", new_list)\ntuple_2= tuple (new_list)\nprint(\"Converted tuple from the list : \", tuple_2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def memo(fn):
cache = {}
missed = object()
def query(*args):
result = cache.get(args, missed)
if result is missed:
result = cache[args] = fn(*args)
return result
return query
@memo
def cal_edit_distance(ori, tar):
def edit_tuple(old, distance, path):
return old[0] + distance, old[1] + '\n' + path
if not ori:
return len(tar), 'add %s' % tar
if not tar:
return len(ori), 'remove %s' % ori
ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]
edit_op_dis = cal_edit_distance(ori_rest, tar_rest)
if ori_head != tar_head:
edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (
ori_head, tar_head))
del_op_dis = cal_edit_distance(ori_rest, tar)
del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)
add_op_dis = cal_edit_distance(ori, tar_rest)
add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)
return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def memo(fn):
cache = {}
missed = object()
def query(*args):
result = cache.get(args, missed)
if result is missed:
result = cache[args] = fn(*args)
return result
return query
@memo
def cal_edit_distance(ori, tar):
def edit_tuple(old, distance, path):
return old[0] + distance, old[1] + '\n' + path
if not ori:
return len(tar), 'add %s' % tar
if not tar:
return len(ori), 'remove %s' % ori
ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]
edit_op_dis = cal_edit_distance(ori_rest, tar_rest)
if ori_head != tar_head:
edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (
ori_head, tar_head))
del_op_dis = cal_edit_distance(ori_rest, tar)
del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)
add_op_dis = cal_edit_distance(ori, tar_rest)
add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)
return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])
<|reserved_special_token_0|>
def edit_distance_norec(ori, tar):
ed = defaultdict(dict)
ed[0] = {i: i for i in range(len(tar) + 1)}
for i in range(len(ori) + 1):
ed[i][0] = i
for i in range(1, len(ori) + 1):
for j in range(1, len(tar) + 1):
del_op = ed[i - 1][j] + 1
add_op = ed[i][j - 1] + 1
edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[
i - 1][j - 1] + 1
ed[i][j] = min(del_op, add_op, edit_op)
return ed[len(ori)][len(tar)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def memo(fn):
cache = {}
missed = object()
def query(*args):
result = cache.get(args, missed)
if result is missed:
result = cache[args] = fn(*args)
return result
return query
@memo
def cal_edit_distance(ori, tar):
def edit_tuple(old, distance, path):
return old[0] + distance, old[1] + '\n' + path
if not ori:
return len(tar), 'add %s' % tar
if not tar:
return len(ori), 'remove %s' % ori
ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]
edit_op_dis = cal_edit_distance(ori_rest, tar_rest)
if ori_head != tar_head:
edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (
ori_head, tar_head))
del_op_dis = cal_edit_distance(ori_rest, tar)
del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)
add_op_dis = cal_edit_distance(ori, tar_rest)
add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)
return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])
<|reserved_special_token_0|>
def edit_distance_norec(ori, tar):
ed = defaultdict(dict)
ed[0] = {i: i for i in range(len(tar) + 1)}
for i in range(len(ori) + 1):
ed[i][0] = i
for i in range(1, len(ori) + 1):
for j in range(1, len(tar) + 1):
del_op = ed[i - 1][j] + 1
add_op = ed[i][j - 1] + 1
edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[
i - 1][j - 1] + 1
ed[i][j] = min(del_op, add_op, edit_op)
return ed[len(ori)][len(tar)]
if __name__ == '__main__':
print(cal_edit_distance('fang', 'renf')[0])
print(edit_distance_norec('fang', 'renf'))
<|reserved_special_token_1|>
def memo(fn):
cache = {}
missed = object()
def query(*args):
result = cache.get(args, missed)
if result is missed:
result = cache[args] = fn(*args)
return result
return query
@memo
def cal_edit_distance(ori, tar):
def edit_tuple(old, distance, path):
return old[0] + distance, old[1] + '\n' + path
if not ori:
return len(tar), 'add %s' % tar
if not tar:
return len(ori), 'remove %s' % ori
ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]
edit_op_dis = cal_edit_distance(ori_rest, tar_rest)
if ori_head != tar_head:
edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (
ori_head, tar_head))
del_op_dis = cal_edit_distance(ori_rest, tar)
del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)
add_op_dis = cal_edit_distance(ori, tar_rest)
add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)
return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])
from collections import defaultdict
def edit_distance_norec(ori, tar):
ed = defaultdict(dict)
ed[0] = {i: i for i in range(len(tar) + 1)}
for i in range(len(ori) + 1):
ed[i][0] = i
for i in range(1, len(ori) + 1):
for j in range(1, len(tar) + 1):
del_op = ed[i - 1][j] + 1
add_op = ed[i][j - 1] + 1
edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[
i - 1][j - 1] + 1
ed[i][j] = min(del_op, add_op, edit_op)
return ed[len(ori)][len(tar)]
if __name__ == '__main__':
print(cal_edit_distance('fang', 'renf')[0])
print(edit_distance_norec('fang', 'renf'))
<|reserved_special_token_1|>
def memo(fn):
cache = {}
missed = object()
def query(*args):
result = cache.get(args, missed)
if result is missed:
result = cache[args] = fn(*args)
return result
return query
@memo
def cal_edit_distance(ori, tar):
def edit_tuple(old, distance, path):
return old[0] + distance, old[1] + "\n" + path
if not ori:
return len(tar), "add %s" % tar
if not tar:
return len(ori), "remove %s" % ori
ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]
edit_op_dis = cal_edit_distance(ori_rest, tar_rest)
if ori_head != tar_head:
edit_op_dis = edit_tuple(edit_op_dis, 1, "replace %s with %s" % (ori_head, tar_head))
del_op_dis = cal_edit_distance(ori_rest, tar)
del_op_dis = edit_tuple(del_op_dis, 1, "delete %s" % ori_head)
add_op_dis = cal_edit_distance(ori, tar_rest)
add_op_dis = edit_tuple(add_op_dis, 1, "add %s" % tar_head)
return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])
from collections import defaultdict
def edit_distance_norec(ori, tar):
ed = defaultdict(dict)
ed[0] = {i: i for i in range(len(tar) + 1)}
for i in range(len(ori) + 1):
ed[i][0] = i
for i in range(1, len(ori) + 1):
for j in range(1, len(tar) + 1):
del_op = ed[i - 1][j] + 1
add_op = ed[i][j - 1] + 1
edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[i - 1][j - 1] + 1
ed[i][j] = min(del_op, add_op, edit_op)
return ed[len(ori)][len(tar)]
if __name__ == '__main__':
print(cal_edit_distance("fang", "renf")[0])
print(edit_distance_norec("fang", "renf"))
|
flexible
|
{
"blob_id": "88390f411af90d494284617ef8f5fb0e9bb8890e",
"index": 8039,
"step-1": "def memo(fn):\n cache = {}\n missed = object()\n\n def query(*args):\n result = cache.get(args, missed)\n if result is missed:\n result = cache[args] = fn(*args)\n return result\n return query\n\n\n@memo\ndef cal_edit_distance(ori, tar):\n\n def edit_tuple(old, distance, path):\n return old[0] + distance, old[1] + '\\n' + path\n if not ori:\n return len(tar), 'add %s' % tar\n if not tar:\n return len(ori), 'remove %s' % ori\n ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]\n edit_op_dis = cal_edit_distance(ori_rest, tar_rest)\n if ori_head != tar_head:\n edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (\n ori_head, tar_head))\n del_op_dis = cal_edit_distance(ori_rest, tar)\n del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)\n add_op_dis = cal_edit_distance(ori, tar_rest)\n add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)\n return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])\n\n\n<mask token>\n",
"step-2": "def memo(fn):\n cache = {}\n missed = object()\n\n def query(*args):\n result = cache.get(args, missed)\n if result is missed:\n result = cache[args] = fn(*args)\n return result\n return query\n\n\n@memo\ndef cal_edit_distance(ori, tar):\n\n def edit_tuple(old, distance, path):\n return old[0] + distance, old[1] + '\\n' + path\n if not ori:\n return len(tar), 'add %s' % tar\n if not tar:\n return len(ori), 'remove %s' % ori\n ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]\n edit_op_dis = cal_edit_distance(ori_rest, tar_rest)\n if ori_head != tar_head:\n edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (\n ori_head, tar_head))\n del_op_dis = cal_edit_distance(ori_rest, tar)\n del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)\n add_op_dis = cal_edit_distance(ori, tar_rest)\n add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)\n return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])\n\n\n<mask token>\n\n\ndef edit_distance_norec(ori, tar):\n ed = defaultdict(dict)\n ed[0] = {i: i for i in range(len(tar) + 1)}\n for i in range(len(ori) + 1):\n ed[i][0] = i\n for i in range(1, len(ori) + 1):\n for j in range(1, len(tar) + 1):\n del_op = ed[i - 1][j] + 1\n add_op = ed[i][j - 1] + 1\n edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[\n i - 1][j - 1] + 1\n ed[i][j] = min(del_op, add_op, edit_op)\n return ed[len(ori)][len(tar)]\n\n\n<mask token>\n",
"step-3": "def memo(fn):\n cache = {}\n missed = object()\n\n def query(*args):\n result = cache.get(args, missed)\n if result is missed:\n result = cache[args] = fn(*args)\n return result\n return query\n\n\n@memo\ndef cal_edit_distance(ori, tar):\n\n def edit_tuple(old, distance, path):\n return old[0] + distance, old[1] + '\\n' + path\n if not ori:\n return len(tar), 'add %s' % tar\n if not tar:\n return len(ori), 'remove %s' % ori\n ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]\n edit_op_dis = cal_edit_distance(ori_rest, tar_rest)\n if ori_head != tar_head:\n edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (\n ori_head, tar_head))\n del_op_dis = cal_edit_distance(ori_rest, tar)\n del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)\n add_op_dis = cal_edit_distance(ori, tar_rest)\n add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)\n return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])\n\n\n<mask token>\n\n\ndef edit_distance_norec(ori, tar):\n ed = defaultdict(dict)\n ed[0] = {i: i for i in range(len(tar) + 1)}\n for i in range(len(ori) + 1):\n ed[i][0] = i\n for i in range(1, len(ori) + 1):\n for j in range(1, len(tar) + 1):\n del_op = ed[i - 1][j] + 1\n add_op = ed[i][j - 1] + 1\n edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[\n i - 1][j - 1] + 1\n ed[i][j] = min(del_op, add_op, edit_op)\n return ed[len(ori)][len(tar)]\n\n\nif __name__ == '__main__':\n print(cal_edit_distance('fang', 'renf')[0])\n print(edit_distance_norec('fang', 'renf'))\n",
"step-4": "def memo(fn):\n cache = {}\n missed = object()\n\n def query(*args):\n result = cache.get(args, missed)\n if result is missed:\n result = cache[args] = fn(*args)\n return result\n return query\n\n\n@memo\ndef cal_edit_distance(ori, tar):\n\n def edit_tuple(old, distance, path):\n return old[0] + distance, old[1] + '\\n' + path\n if not ori:\n return len(tar), 'add %s' % tar\n if not tar:\n return len(ori), 'remove %s' % ori\n ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]\n edit_op_dis = cal_edit_distance(ori_rest, tar_rest)\n if ori_head != tar_head:\n edit_op_dis = edit_tuple(edit_op_dis, 1, 'replace %s with %s' % (\n ori_head, tar_head))\n del_op_dis = cal_edit_distance(ori_rest, tar)\n del_op_dis = edit_tuple(del_op_dis, 1, 'delete %s' % ori_head)\n add_op_dis = cal_edit_distance(ori, tar_rest)\n add_op_dis = edit_tuple(add_op_dis, 1, 'add %s' % tar_head)\n return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])\n\n\nfrom collections import defaultdict\n\n\ndef edit_distance_norec(ori, tar):\n ed = defaultdict(dict)\n ed[0] = {i: i for i in range(len(tar) + 1)}\n for i in range(len(ori) + 1):\n ed[i][0] = i\n for i in range(1, len(ori) + 1):\n for j in range(1, len(tar) + 1):\n del_op = ed[i - 1][j] + 1\n add_op = ed[i][j - 1] + 1\n edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[\n i - 1][j - 1] + 1\n ed[i][j] = min(del_op, add_op, edit_op)\n return ed[len(ori)][len(tar)]\n\n\nif __name__ == '__main__':\n print(cal_edit_distance('fang', 'renf')[0])\n print(edit_distance_norec('fang', 'renf'))\n",
"step-5": "def memo(fn):\n cache = {}\n missed = object()\n\n def query(*args):\n result = cache.get(args, missed)\n if result is missed:\n result = cache[args] = fn(*args)\n return result\n\n return query\n\n\n@memo\ndef cal_edit_distance(ori, tar):\n def edit_tuple(old, distance, path):\n return old[0] + distance, old[1] + \"\\n\" + path\n\n if not ori:\n return len(tar), \"add %s\" % tar\n if not tar:\n return len(ori), \"remove %s\" % ori\n ori_head, ori_rest, tar_head, tar_rest = ori[0], ori[1:], tar[0], tar[1:]\n edit_op_dis = cal_edit_distance(ori_rest, tar_rest)\n if ori_head != tar_head:\n edit_op_dis = edit_tuple(edit_op_dis, 1, \"replace %s with %s\" % (ori_head, tar_head))\n del_op_dis = cal_edit_distance(ori_rest, tar)\n del_op_dis = edit_tuple(del_op_dis, 1, \"delete %s\" % ori_head)\n add_op_dis = cal_edit_distance(ori, tar_rest)\n add_op_dis = edit_tuple(add_op_dis, 1, \"add %s\" % tar_head)\n return min(edit_op_dis, del_op_dis, add_op_dis, key=lambda e: e[0])\n\n\nfrom collections import defaultdict\n\n\ndef edit_distance_norec(ori, tar):\n ed = defaultdict(dict)\n ed[0] = {i: i for i in range(len(tar) + 1)}\n for i in range(len(ori) + 1):\n ed[i][0] = i\n for i in range(1, len(ori) + 1):\n for j in range(1, len(tar) + 1):\n del_op = ed[i - 1][j] + 1\n add_op = ed[i][j - 1] + 1\n edit_op = ed[i - 1][j - 1] if ori[i - 1] == tar[j - 1] else ed[i - 1][j - 1] + 1\n ed[i][j] = min(del_op, add_op, edit_op)\n return ed[len(ori)][len(tar)]\n\nif __name__ == '__main__':\n print(cal_edit_distance(\"fang\", \"renf\")[0])\n print(edit_distance_norec(\"fang\", \"renf\"))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('news', '0002_auto_20210317_1400')]
operations = [migrations.AlterField(model_name='author', name='author',
field=models.OneToOneField(on_delete=django.db.models.deletion.
CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор'))]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('news', '0002_auto_20210317_1400')]
operations = [migrations.AlterField(model_name='author', name='author',
field=models.OneToOneField(on_delete=django.db.models.deletion.
CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор'))]
<|reserved_special_token_1|>
# Generated by Django 3.1.7 on 2021-03-19 14:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('news', '0002_auto_20210317_1400'),
]
operations = [
migrations.AlterField(
model_name='author',
name='author',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор'),
),
]
|
flexible
|
{
"blob_id": "8b4bc312bf4b64f98c4f84f4bf89984291be0428",
"index": 6033,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('news', '0002_auto_20210317_1400')]\n operations = [migrations.AlterField(model_name='author', name='author',\n field=models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор'))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('news', '0002_auto_20210317_1400')]\n operations = [migrations.AlterField(model_name='author', name='author',\n field=models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор'))]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-03-19 14:38\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('news', '0002_auto_20210317_1400'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='author',\n name='author',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class BaseException(Exception):
def __init__(self, message=""):
super(BaseException, self).__init__()
self.message = message
|
normal
|
{
"blob_id": "2ee1539e051677ad38ab7727ff5edefb1aebd015",
"index": 9946,
"step-1": "<mask token>\n",
"step-2": "class BaseException(Exception):\n <mask token>\n",
"step-3": "class BaseException(Exception):\n\n def __init__(self, message=''):\n super(BaseException, self).__init__()\n self.message = message\n",
"step-4": "class BaseException(Exception):\n def __init__(self, message=\"\"):\n super(BaseException, self).__init__()\n self.message = message\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from arcade.sprite_list.sprite_list import SpriteList
import GamePiece as gp
from Errors import *
class GameConfig:
WINDOW_TITLE = "MyPyTris"
SCREEN_WIDTH = 450
SCREEN_HEIGHT = 900
BLOCK_PX = 45 # 45px blocks on screen
SPRITE_PX = 64 # 64px sprite
BLOCK_SCALE = BLOCK_PX/SPRITE_PX # sprite scale ratio
class GameBoard:
""" Class to manage blocks on the game board """
def __init__(self, width: int, height: int):
# 2D list of blocks initialized to empty in the width and height of our game board
self.width = width
self.height = height
self.blocks = [[None for y in range(width)] for x in range(height)]
self.playerSprites = SpriteList()
self.groundSprites = SpriteList()
def draw(self):
self.playerSprites.draw()
self.groundSprites.draw()
def canMoveBlock(self, x: int, y: int) -> bool:
return self.blocks[x][y] is None
def canMoveGamePiece(self, gamePiece:gp.GamePiece, xTo:int, yTo:int) -> bool:
for yDiff, row in enumerate(gamePiece.blocks):
for xDiff, block in enumerate(row):
if block is None:
continue
newX = xTo + xDiff
newY = yTo + yDiff
if newX >= self.width or newX < 0:
return False
if newY < 0 or newY >= self.height:
return False
if self.blocks[newY][newX] is not None \
and self.blocks[newY][newX] not in gamePiece.allBlocks():
return False
return True
def moveGamePiece(self, gamePiece:gp.GamePiece, xTo:int, yTo:int):
if (not self.canMoveGamePiece(gamePiece, xTo, yTo)):
return False
# remove blocks from game board
for y, row in enumerate(gamePiece.blocks):
for x, block in enumerate(row):
if block is not None:
self.blocks[y + gamePiece.y][x + gamePiece.x] = None
# add blocks in new positions
for y, row in enumerate(gamePiece.blocks):
for x, block in enumerate(row):
if block is not None:
blockXDiff = block.x - gamePiece.x
blockYDiff = block.y - gamePiece.y
newBlockX = xTo + blockXDiff
newBlockY = yTo + blockYDiff
self.blocks[newBlockY][newBlockX] = block
block.moveTo(newBlockX, newBlockY)
gamePiece.x = xTo
gamePiece.y = yTo
def addBlock(self, aBlock: gp.Block):
"""adds a block to the game board"""
if self.blocks[aBlock.y][aBlock.x] != None:
raise MovementError('game board space not empty')
self.blocks[aBlock.y][aBlock.x] = aBlock
self.groundSprites.append(aBlock.sprite)
def addGamePiece(self, gamePiece:gp.GamePiece):
for y in range(gamePiece.size):
for x in range(gamePiece.size):
block = gamePiece.blocks[y][x]
if block is None:
continue
self.blocks[block.y][block.x] = block
self.playerSprites.append(block.sprite)
def moveBlock(self, aBlock: gp.Block, x: int, y: int):
self.blocks[aBlock.y][aBlock.x] = None
self.blocks[y][x] = aBlock
def removeBlock(self, aBlock: gp.Block):
""" remove a block from the game board """
for y, row in iter(self.blocks):
for x, block in iter(row):
if block is aBlock:
self.blocks[y][x] = None
self.playerSprites.remove(aBlock.sprite)
return
class GameManager:
def __init__(self) -> None:
pass
def start(self):
gameBoard = GameBoard(10, 20)
gameBoard.addGamePiece()
|
normal
|
{
"blob_id": "2d7431996bc8d1099c08fddc815b4706deb4f023",
"index": 4393,
"step-1": "<mask token>\n\n\nclass GameBoard:\n <mask token>\n <mask token>\n\n def draw(self):\n self.playerSprites.draw()\n self.groundSprites.draw()\n <mask token>\n <mask token>\n\n def moveGamePiece(self, gamePiece: gp.GamePiece, xTo: int, yTo: int):\n if not self.canMoveGamePiece(gamePiece, xTo, yTo):\n return False\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n self.blocks[y + gamePiece.y][x + gamePiece.x] = None\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n blockXDiff = block.x - gamePiece.x\n blockYDiff = block.y - gamePiece.y\n newBlockX = xTo + blockXDiff\n newBlockY = yTo + blockYDiff\n self.blocks[newBlockY][newBlockX] = block\n block.moveTo(newBlockX, newBlockY)\n gamePiece.x = xTo\n gamePiece.y = yTo\n <mask token>\n <mask token>\n <mask token>\n\n def removeBlock(self, aBlock: gp.Block):\n \"\"\" remove a block from the game board \"\"\"\n for y, row in iter(self.blocks):\n for x, block in iter(row):\n if block is aBlock:\n self.blocks[y][x] = None\n self.playerSprites.remove(aBlock.sprite)\n return\n\n\nclass GameManager:\n\n def __init__(self) ->None:\n pass\n\n def start(self):\n gameBoard = GameBoard(10, 20)\n gameBoard.addGamePiece()\n",
"step-2": "<mask token>\n\n\nclass GameBoard:\n <mask token>\n\n def __init__(self, width: int, height: int):\n self.width = width\n self.height = height\n self.blocks = [[None for y in range(width)] for x in range(height)]\n self.playerSprites = SpriteList()\n self.groundSprites = SpriteList()\n\n def draw(self):\n self.playerSprites.draw()\n self.groundSprites.draw()\n\n def canMoveBlock(self, x: int, y: int) ->bool:\n return self.blocks[x][y] is None\n <mask token>\n\n def moveGamePiece(self, gamePiece: gp.GamePiece, xTo: int, yTo: int):\n if not self.canMoveGamePiece(gamePiece, xTo, yTo):\n return False\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n self.blocks[y + gamePiece.y][x + gamePiece.x] = None\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n blockXDiff = block.x - gamePiece.x\n blockYDiff = block.y - gamePiece.y\n newBlockX = xTo + blockXDiff\n newBlockY = yTo + blockYDiff\n self.blocks[newBlockY][newBlockX] = block\n block.moveTo(newBlockX, newBlockY)\n gamePiece.x = xTo\n gamePiece.y = yTo\n\n def addBlock(self, aBlock: gp.Block):\n \"\"\"adds a block to the game board\"\"\"\n if self.blocks[aBlock.y][aBlock.x] != None:\n raise MovementError('game board space not empty')\n self.blocks[aBlock.y][aBlock.x] = aBlock\n self.groundSprites.append(aBlock.sprite)\n\n def addGamePiece(self, gamePiece: gp.GamePiece):\n for y in range(gamePiece.size):\n for x in range(gamePiece.size):\n block = gamePiece.blocks[y][x]\n if block is None:\n continue\n self.blocks[block.y][block.x] = block\n self.playerSprites.append(block.sprite)\n\n def moveBlock(self, aBlock: gp.Block, x: int, y: int):\n self.blocks[aBlock.y][aBlock.x] = None\n self.blocks[y][x] = aBlock\n\n def removeBlock(self, aBlock: gp.Block):\n \"\"\" remove a block from the game board \"\"\"\n for y, row in iter(self.blocks):\n for x, block in iter(row):\n if block is aBlock:\n self.blocks[y][x] = None\n self.playerSprites.remove(aBlock.sprite)\n return\n\n\nclass GameManager:\n\n def __init__(self) ->None:\n pass\n\n def start(self):\n gameBoard = GameBoard(10, 20)\n gameBoard.addGamePiece()\n",
"step-3": "<mask token>\n\n\nclass GameBoard:\n <mask token>\n\n def __init__(self, width: int, height: int):\n self.width = width\n self.height = height\n self.blocks = [[None for y in range(width)] for x in range(height)]\n self.playerSprites = SpriteList()\n self.groundSprites = SpriteList()\n\n def draw(self):\n self.playerSprites.draw()\n self.groundSprites.draw()\n\n def canMoveBlock(self, x: int, y: int) ->bool:\n return self.blocks[x][y] is None\n\n def canMoveGamePiece(self, gamePiece: gp.GamePiece, xTo: int, yTo: int\n ) ->bool:\n for yDiff, row in enumerate(gamePiece.blocks):\n for xDiff, block in enumerate(row):\n if block is None:\n continue\n newX = xTo + xDiff\n newY = yTo + yDiff\n if newX >= self.width or newX < 0:\n return False\n if newY < 0 or newY >= self.height:\n return False\n if self.blocks[newY][newX] is not None and self.blocks[newY][\n newX] not in gamePiece.allBlocks():\n return False\n return True\n\n def moveGamePiece(self, gamePiece: gp.GamePiece, xTo: int, yTo: int):\n if not self.canMoveGamePiece(gamePiece, xTo, yTo):\n return False\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n self.blocks[y + gamePiece.y][x + gamePiece.x] = None\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n blockXDiff = block.x - gamePiece.x\n blockYDiff = block.y - gamePiece.y\n newBlockX = xTo + blockXDiff\n newBlockY = yTo + blockYDiff\n self.blocks[newBlockY][newBlockX] = block\n block.moveTo(newBlockX, newBlockY)\n gamePiece.x = xTo\n gamePiece.y = yTo\n\n def addBlock(self, aBlock: gp.Block):\n \"\"\"adds a block to the game board\"\"\"\n if self.blocks[aBlock.y][aBlock.x] != None:\n raise MovementError('game board space not empty')\n self.blocks[aBlock.y][aBlock.x] = aBlock\n self.groundSprites.append(aBlock.sprite)\n\n def addGamePiece(self, gamePiece: gp.GamePiece):\n for y in range(gamePiece.size):\n for x in range(gamePiece.size):\n block = gamePiece.blocks[y][x]\n if block is None:\n continue\n self.blocks[block.y][block.x] = block\n self.playerSprites.append(block.sprite)\n\n def moveBlock(self, aBlock: gp.Block, x: int, y: int):\n self.blocks[aBlock.y][aBlock.x] = None\n self.blocks[y][x] = aBlock\n\n def removeBlock(self, aBlock: gp.Block):\n \"\"\" remove a block from the game board \"\"\"\n for y, row in iter(self.blocks):\n for x, block in iter(row):\n if block is aBlock:\n self.blocks[y][x] = None\n self.playerSprites.remove(aBlock.sprite)\n return\n\n\nclass GameManager:\n\n def __init__(self) ->None:\n pass\n\n def start(self):\n gameBoard = GameBoard(10, 20)\n gameBoard.addGamePiece()\n",
"step-4": "from arcade.sprite_list.sprite_list import SpriteList\nimport GamePiece as gp\nfrom Errors import *\n\n\nclass GameConfig:\n WINDOW_TITLE = 'MyPyTris'\n SCREEN_WIDTH = 450\n SCREEN_HEIGHT = 900\n BLOCK_PX = 45\n SPRITE_PX = 64\n BLOCK_SCALE = BLOCK_PX / SPRITE_PX\n\n\nclass GameBoard:\n \"\"\" Class to manage blocks on the game board \"\"\"\n\n def __init__(self, width: int, height: int):\n self.width = width\n self.height = height\n self.blocks = [[None for y in range(width)] for x in range(height)]\n self.playerSprites = SpriteList()\n self.groundSprites = SpriteList()\n\n def draw(self):\n self.playerSprites.draw()\n self.groundSprites.draw()\n\n def canMoveBlock(self, x: int, y: int) ->bool:\n return self.blocks[x][y] is None\n\n def canMoveGamePiece(self, gamePiece: gp.GamePiece, xTo: int, yTo: int\n ) ->bool:\n for yDiff, row in enumerate(gamePiece.blocks):\n for xDiff, block in enumerate(row):\n if block is None:\n continue\n newX = xTo + xDiff\n newY = yTo + yDiff\n if newX >= self.width or newX < 0:\n return False\n if newY < 0 or newY >= self.height:\n return False\n if self.blocks[newY][newX] is not None and self.blocks[newY][\n newX] not in gamePiece.allBlocks():\n return False\n return True\n\n def moveGamePiece(self, gamePiece: gp.GamePiece, xTo: int, yTo: int):\n if not self.canMoveGamePiece(gamePiece, xTo, yTo):\n return False\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n self.blocks[y + gamePiece.y][x + gamePiece.x] = None\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n blockXDiff = block.x - gamePiece.x\n blockYDiff = block.y - gamePiece.y\n newBlockX = xTo + blockXDiff\n newBlockY = yTo + blockYDiff\n self.blocks[newBlockY][newBlockX] = block\n block.moveTo(newBlockX, newBlockY)\n gamePiece.x = xTo\n gamePiece.y = yTo\n\n def addBlock(self, aBlock: gp.Block):\n \"\"\"adds a block to the game board\"\"\"\n if self.blocks[aBlock.y][aBlock.x] != None:\n raise MovementError('game board space not empty')\n self.blocks[aBlock.y][aBlock.x] = aBlock\n self.groundSprites.append(aBlock.sprite)\n\n def addGamePiece(self, gamePiece: gp.GamePiece):\n for y in range(gamePiece.size):\n for x in range(gamePiece.size):\n block = gamePiece.blocks[y][x]\n if block is None:\n continue\n self.blocks[block.y][block.x] = block\n self.playerSprites.append(block.sprite)\n\n def moveBlock(self, aBlock: gp.Block, x: int, y: int):\n self.blocks[aBlock.y][aBlock.x] = None\n self.blocks[y][x] = aBlock\n\n def removeBlock(self, aBlock: gp.Block):\n \"\"\" remove a block from the game board \"\"\"\n for y, row in iter(self.blocks):\n for x, block in iter(row):\n if block is aBlock:\n self.blocks[y][x] = None\n self.playerSprites.remove(aBlock.sprite)\n return\n\n\nclass GameManager:\n\n def __init__(self) ->None:\n pass\n\n def start(self):\n gameBoard = GameBoard(10, 20)\n gameBoard.addGamePiece()\n",
"step-5": "\nfrom arcade.sprite_list.sprite_list import SpriteList\nimport GamePiece as gp\nfrom Errors import *\n\nclass GameConfig:\n WINDOW_TITLE = \"MyPyTris\"\n SCREEN_WIDTH = 450\n SCREEN_HEIGHT = 900\n BLOCK_PX = 45 # 45px blocks on screen\n SPRITE_PX = 64 # 64px sprite\n BLOCK_SCALE = BLOCK_PX/SPRITE_PX # sprite scale ratio\n\nclass GameBoard:\n \"\"\" Class to manage blocks on the game board \"\"\"\n\n def __init__(self, width: int, height: int):\n # 2D list of blocks initialized to empty in the width and height of our game board\n self.width = width\n self.height = height\n self.blocks = [[None for y in range(width)] for x in range(height)]\n self.playerSprites = SpriteList()\n self.groundSprites = SpriteList()\n\n\n def draw(self):\n self.playerSprites.draw()\n self.groundSprites.draw()\n\n def canMoveBlock(self, x: int, y: int) -> bool:\n return self.blocks[x][y] is None\n\n def canMoveGamePiece(self, gamePiece:gp.GamePiece, xTo:int, yTo:int) -> bool:\n for yDiff, row in enumerate(gamePiece.blocks):\n for xDiff, block in enumerate(row):\n if block is None:\n continue\n newX = xTo + xDiff\n newY = yTo + yDiff\n if newX >= self.width or newX < 0:\n return False\n if newY < 0 or newY >= self.height:\n return False\n if self.blocks[newY][newX] is not None \\\n and self.blocks[newY][newX] not in gamePiece.allBlocks():\n return False\n return True\n\n def moveGamePiece(self, gamePiece:gp.GamePiece, xTo:int, yTo:int):\n if (not self.canMoveGamePiece(gamePiece, xTo, yTo)):\n return False\n\n # remove blocks from game board\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n self.blocks[y + gamePiece.y][x + gamePiece.x] = None\n\n # add blocks in new positions\n for y, row in enumerate(gamePiece.blocks):\n for x, block in enumerate(row):\n if block is not None:\n blockXDiff = block.x - gamePiece.x\n blockYDiff = block.y - gamePiece.y\n newBlockX = xTo + blockXDiff\n newBlockY = yTo + blockYDiff\n self.blocks[newBlockY][newBlockX] = block\n block.moveTo(newBlockX, newBlockY)\n\n gamePiece.x = xTo\n gamePiece.y = yTo\n \n\n def addBlock(self, aBlock: gp.Block):\n \"\"\"adds a block to the game board\"\"\"\n\n if self.blocks[aBlock.y][aBlock.x] != None:\n raise MovementError('game board space not empty')\n self.blocks[aBlock.y][aBlock.x] = aBlock\n self.groundSprites.append(aBlock.sprite)\n\n def addGamePiece(self, gamePiece:gp.GamePiece):\n for y in range(gamePiece.size):\n for x in range(gamePiece.size):\n block = gamePiece.blocks[y][x]\n if block is None:\n continue\n self.blocks[block.y][block.x] = block\n self.playerSprites.append(block.sprite)\n\n def moveBlock(self, aBlock: gp.Block, x: int, y: int):\n self.blocks[aBlock.y][aBlock.x] = None\n self.blocks[y][x] = aBlock\n\n def removeBlock(self, aBlock: gp.Block):\n \"\"\" remove a block from the game board \"\"\"\n \n for y, row in iter(self.blocks):\n for x, block in iter(row):\n if block is aBlock:\n self.blocks[y][x] = None\n self.playerSprites.remove(aBlock.sprite)\n return\n\n\nclass GameManager:\n\n def __init__(self) -> None:\n pass\n \n def start(self):\n gameBoard = GameBoard(10, 20)\n gameBoard.addGamePiece()",
"step-ids": [
7,
12,
13,
17,
18
]
}
|
[
7,
12,
13,
17,
18
] |
<|reserved_special_token_0|>
class Player:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
scr = pygame.display.set_mode((700, 700))
enemies = []
hit = []
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
cmd = Enemy()
gun = Gun()
player = Player()
cmd.spawn()
cmd.spawn()
last = 0
frames = 0
fro = 1
while True:
frames += 1
scr.fill((0, 0, 0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5, player.y + 12.5, angle)
for i in range(0, player.lives):
scr.blit(player.image1, (i * 35, 1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians
(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians
(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.
radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.
radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x, player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.
radians(enemies[i].rotate(player.x, player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.
radians(enemies[i].rotate(player.x, player.y) - 90))
enemies[i].image = pygame.image.load('enemy.png').convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,
enemies[i].rotate(player.x, player.y))
angle2 = enemies[i].rotate(player.x, player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j
].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i
].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j
].x < player.x + 25 and gun.bullets2[j
].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit) - 1):
if not (hit[i].x > player.x or hit[i].x < player.x +
25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load('player.jpg').convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image, player.rotate(Mpos
[0], Mpos[1]))
angle = player.rotate(Mpos[0], Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
<|reserved_special_token_1|>
import random
import math
import time
import pygame
pygame.init()
scr = pygame.display.set_mode((700, 700))
enemies = []
hit = []
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
cmd = Enemy()
gun = Gun()
player = Player()
cmd.spawn()
cmd.spawn()
last = 0
frames = 0
fro = 1
while True:
frames += 1
scr.fill((0, 0, 0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5, player.y + 12.5, angle)
for i in range(0, player.lives):
scr.blit(player.image1, (i * 35, 1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians
(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians
(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.
radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.
radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x, player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.
radians(enemies[i].rotate(player.x, player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.
radians(enemies[i].rotate(player.x, player.y) - 90))
enemies[i].image = pygame.image.load('enemy.png').convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,
enemies[i].rotate(player.x, player.y))
angle2 = enemies[i].rotate(player.x, player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j
].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i
].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j
].x < player.x + 25 and gun.bullets2[j
].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit) - 1):
if not (hit[i].x > player.x or hit[i].x < player.x +
25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load('player.jpg').convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image, player.rotate(Mpos
[0], Mpos[1]))
angle = player.rotate(Mpos[0], Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
<|reserved_special_token_1|>
import random
import math
import time
import pygame
pygame.init()
scr = pygame.display.set_mode((700,700))
enemies = []
#music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3')
#pygame.mixer.music.play(-1)
hit = []
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image,(self.x,self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
sin = oppos/hypot
radians = math.asin(sin)
angle = radians * (180/3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self,x,y,angle):
self.bullets.append(Bullet((0,255,255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self,x,y,angle):
self.bullets2.append(Bullet((255,255,0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image,(self.x,self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
sin = oppos/hypot
radians = math.asin(sin)
angle = radians * (180/3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self,x,y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0,600)
enemies[-1].y = random.randint(0,600)
cmd = Enemy()
gun = Gun()
player = Player()
cmd.spawn()
cmd.spawn()
last = 0
frames = 0
fro = 1
while True:
frames += 1
scr.fill((0,0,0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5,player.y + 12.5,angle)
for i in range(0,player.lives):
scr.blit(player.image1,(i*35,1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x,player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90))
enemies[i].image = pygame.image.load("enemy.png").convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y))
angle2 = enemies[i].rotate(player.x,player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit)-1):
if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load("player.jpg").convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1]))
angle = player.rotate(Mpos[0],Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
|
flexible
|
{
"blob_id": "54e04d740ef46fca04cf4169d2e7c05083414bd8",
"index": 11,
"step-1": "<mask token>\n\n\nclass Player:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<mask token>\n",
"step-3": "<mask token>\npygame.init()\nscr = pygame.display.set_mode((700, 700))\nenemies = []\nhit = []\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\ncmd = Enemy()\ngun = Gun()\nplayer = Player()\ncmd.spawn()\ncmd.spawn()\nlast = 0\nframes = 0\nfro = 1\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n",
"step-4": "import random\nimport math\nimport time\nimport pygame\npygame.init()\nscr = pygame.display.set_mode((700, 700))\nenemies = []\nhit = []\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\ncmd = Enemy()\ngun = Gun()\nplayer = Player()\ncmd.spawn()\ncmd.spawn()\nlast = 0\nframes = 0\nfro = 1\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n",
"step-5": "import random\r\nimport math\r\nimport time\r\nimport pygame\r\npygame.init()\r\nscr = pygame.display.set_mode((700,700))\r\nenemies = []\r\n#music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3')\r\n#pygame.mixer.music.play(-1)\r\nhit = []\r\nclass Player:\r\n def __init__(self):\r\n self.x = 275\r\n self.y = 275\r\n self.image = pygame.image.load('player.jpg')\r\n self.image1 = pygame.image.load('hearts.png')\r\n self.lives = 5\r\n def draw(self):\r\n scr.blit(self.image,(self.x,self.y))\r\n def rotate(self, x, y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n sin = oppos/hypot\r\n radians = math.asin(sin)\r\n angle = radians * (180/3.14)\r\n if x > self.x: \r\n if y > self.y:\r\n angle -= angle + angle\r\n if x < self.x:\r\n angle = 180 + (angle - (angle + angle))\r\n if y > self.y:\r\n angle -= angle + angle\r\n return angle - 90\r\nclass Bullet:\r\n def __init__(self, color):\r\n self.x = 0\r\n self.y = 0\r\n self.angle = 0\r\n self.color = color\r\n def draw(self):\r\n pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5))\r\nclass Gun:\r\n def __init__(self):\r\n self.x = 0\r\n self.y = 0\r\n self.bullets = []\r\n self.bullets2 = []\r\n def shoot1(self,x,y,angle):\r\n self.bullets.append(Bullet((0,255,255)))\r\n self.bullets[-1].x = x\r\n self.bullets[-1].y = y\r\n self.bullets[-1].angle = angle\r\n def shoot2(self,x,y,angle):\r\n self.bullets2.append(Bullet((255,255,0)))\r\n self.bullets2[-1].x = x\r\n self.bullets2[-1].y = y\r\n self.bullets2[-1].angle = angle\r\nclass Enemy:\r\n def __init__(self):\r\n self.x = 100\r\n self.y = 100\r\n self.speed = 2\r\n self.hearts = 3\r\n self.image = pygame.image.load('enemy.png')\r\n def draw(self):\r\n scr.blit(self.image,(self.x,self.y))\r\n def rotate(self, x, y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n sin = oppos/hypot\r\n radians = math.asin(sin)\r\n angle = radians * (180/3.14)\r\n if x > self.x: \r\n if y > self.y:\r\n angle -= angle + angle\r\n if x < self.x:\r\n angle = 180 + (angle - (angle + angle))\r\n if y > self.y:\r\n angle -= angle + angle\r\n return angle - 90\r\n def distance(self,x,y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n return hypot\r\n def spawn(self):\r\n enemies.append(Enemy())\r\n enemies[-1].x = random.randint(0,600)\r\n enemies[-1].y = random.randint(0,600)\r\ncmd = Enemy()\r\ngun = Gun() \r\nplayer = Player()\r\ncmd.spawn()\r\ncmd.spawn()\r\nlast = 0\r\nframes = 0\r\nfro = 1\r\nwhile True:\r\n frames += 1\r\n scr.fill((0,0,0))\r\n for event in pygame.event.get():\r\n key = pygame.key.get_pressed()\r\n Mpos = pygame.mouse.get_pos()\r\n if event.type == 5:\r\n gun.shoot1(player.x + 12.5,player.y + 12.5,angle)\r\n for i in range(0,player.lives):\r\n scr.blit(player.image1,(i*35,1))\r\n \r\n for i in range(len(gun.bullets)):\r\n try:\r\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90))\r\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90))\r\n if gun.bullets[i].x > 600:\r\n del gun.bullets[i]\r\n if gun.bullets[i].x < 0:\r\n del gun.bullets[i]\r\n if gun.bullets[i].y > 600:\r\n del gun.bullets[i]\r\n if gun.bullets[i].y < 0:\r\n del gun.bullets[i]\r\n gun.bullets[i].draw()\r\n except IndexError:\r\n pass\r\n for i in range(len(gun.bullets2)):\r\n try:\r\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90))\r\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90))\r\n if gun.bullets2[i].x > 600:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].x < 0:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].y > 600:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].y < 0:\r\n del gun.bullets2[i]\r\n gun.bullets2[i].draw()\r\n except IndexError:\r\n pass\r\n for i in range(len(enemies)):\r\n if enemies[i].distance(player.x,player.y) > 100:\r\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90))\r\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90))\r\n enemies[i].image = pygame.image.load(\"enemy.png\").convert()\r\n enemies[i].image = enemies[i].image.copy()\r\n enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y))\r\n angle2 = enemies[i].rotate(player.x,player.y)\r\n if frames % 100 == 0:\r\n gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2)\r\n enemies[i].draw()\r\n for j in range(len(gun.bullets)):\r\n for i in range(len(gun.bullets)):\r\n try:\r\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25:\r\n del enemies[i]\r\n except IndexError:\r\n pass\r\n for j in range(len(gun.bullets2)):\r\n for i in range(len(gun.bullets2)):\r\n try:\r\n if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25:\r\n for i in range(len(hit)-1):\r\n if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y):\r\n del hit[i]\r\n if hit.count(gun.bullets2[j]) == 0:\r\n hit.append(gun.bullets2[j])\r\n player.lives = 5 - len(hit)\r\n except IndexError:\r\n pass\r\n if key[pygame.K_a]:\r\n player.x -= 3\r\n if key[pygame.K_d]:\r\n player.x += 3\r\n if key[pygame.K_w]:\r\n player.y -= 3\r\n if key[pygame.K_s]:\r\n player.y += 3\r\n if frames % 150 == 0:\r\n cmd.spawn()\r\n if player.lives < 1:\r\n pygame.quit()\r\n break\r\n player.image = pygame.image.load(\"player.jpg\").convert()\r\n player.image = player.image.copy()\r\n player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1]))\r\n angle = player.rotate(Mpos[0],Mpos[1])\r\n player.draw()\r\n pygame.display.update()\r\n time.sleep(0.005)\r\nquit()\r\n\r\n",
"step-ids": [
14,
17,
19,
20,
21
]
}
|
[
14,
17,
19,
20,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lcs2(a, b):
dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]
for x in range(1, a + 1):
for y in range(1, b + 1):
if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:
dp_result[x][y] = dp_result[x - 1][y - 1] + 1
else:
dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -
1], dp_result[x][y])
return dp_result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lcs2(a, b):
dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]
for x in range(1, a + 1):
for y in range(1, b + 1):
if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:
dp_result[x][y] = dp_result[x - 1][y - 1] + 1
else:
dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -
1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
<|reserved_special_token_1|>
import sys
def lcs2(a, b):
dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]
for x in range(1, a + 1):
for y in range(1, b + 1):
if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:
dp_result[x][y] = dp_result[x - 1][y - 1] + 1
else:
dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -
1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
<|reserved_special_token_1|>
#Uses python3
import sys
def lcs2(a, b):
dp_result = [[0 for j in range(b+1)] for i in range(a+1)]
for x in range(1, a+1):
for y in range(1, b+1):
if a[x-1] == b[y-1] and b[y-1] == c[z-1]:
dp_result[x][y] = dp_result[x-1][y-1] + 1
else:
dp_result[x][y] = max(dp_result[x-1][y], dp_result[x][y-1], dp_result[x][y])
return dp_result
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
data = data[1:]
a = data[:n]
data = data[n:]
m = data[0]
data = data[1:]
b = data[:m]
print(lcs2(a, b))
|
flexible
|
{
"blob_id": "d20b336c6588c3cfc4393256b660d6e4ff56b84e",
"index": 1543,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lcs2(a, b):\n dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]\n for x in range(1, a + 1):\n for y in range(1, b + 1):\n if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:\n dp_result[x][y] = dp_result[x - 1][y - 1] + 1\n else:\n dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -\n 1], dp_result[x][y])\n return dp_result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef lcs2(a, b):\n dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]\n for x in range(1, a + 1):\n for y in range(1, b + 1):\n if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:\n dp_result[x][y] = dp_result[x - 1][y - 1] + 1\n else:\n dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -\n 1], dp_result[x][y])\n return dp_result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n data = data[1:]\n a = data[:n]\n data = data[n:]\n m = data[0]\n data = data[1:]\n b = data[:m]\n print(lcs2(a, b))\n",
"step-4": "import sys\n\n\ndef lcs2(a, b):\n dp_result = [[(0) for j in range(b + 1)] for i in range(a + 1)]\n for x in range(1, a + 1):\n for y in range(1, b + 1):\n if a[x - 1] == b[y - 1] and b[y - 1] == c[z - 1]:\n dp_result[x][y] = dp_result[x - 1][y - 1] + 1\n else:\n dp_result[x][y] = max(dp_result[x - 1][y], dp_result[x][y -\n 1], dp_result[x][y])\n return dp_result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n data = data[1:]\n a = data[:n]\n data = data[n:]\n m = data[0]\n data = data[1:]\n b = data[:m]\n print(lcs2(a, b))\n",
"step-5": "#Uses python3\n\nimport sys\n\ndef lcs2(a, b): \n dp_result = [[0 for j in range(b+1)] for i in range(a+1)]\n for x in range(1, a+1):\n for y in range(1, b+1):\n if a[x-1] == b[y-1] and b[y-1] == c[z-1]: \n dp_result[x][y] = dp_result[x-1][y-1] + 1\n else:\n dp_result[x][y] = max(dp_result[x-1][y], dp_result[x][y-1], dp_result[x][y])\n\n return dp_result\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n\n n = data[0]\n data = data[1:]\n a = data[:n]\n\n data = data[n:]\n m = data[0]\n data = data[1:]\n b = data[:m]\n\n print(lcs2(a, b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def qs(li):
n, p = len(li), len(li) // 2 - 1
if n <= 1:
return li
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def qs(li):
n, p = len(li), len(li) // 2 - 1
if n <= 1:
return li
print(qs([11, 45, 23, 81, 28, 34]))
print(qs([11, 45, 22, 81, 23, 34, 99, 22, 17, 8]))
print(qs([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]))
<|reserved_special_token_1|>
# def qs(li):
# n = len(li)
# if n <= 1:
# return li
# pivot = li[n - 1]
# left = []
# right = []
# for i in li[:n - 1]:
# if i <= pivot:
# left.append(i)
# else:
# right.append(i)
# left = qs(left)
# right = qs(right)
# return left + [pivot] + right
def qs(li):
n, p = len(li), len(li)//2 - 1
if n <= 1:
return li
print(qs([11, 45, 23, 81, 28, 34]))
print(qs([11, 45, 22, 81, 23, 34, 99, 22, 17, 8]))
print(qs([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]))
|
flexible
|
{
"blob_id": "605d8144d18207314981872ec57cec6cb2510601",
"index": 7457,
"step-1": "<mask token>\n",
"step-2": "def qs(li):\n n, p = len(li), len(li) // 2 - 1\n if n <= 1:\n return li\n\n\n<mask token>\n",
"step-3": "def qs(li):\n n, p = len(li), len(li) // 2 - 1\n if n <= 1:\n return li\n\n\nprint(qs([11, 45, 23, 81, 28, 34]))\nprint(qs([11, 45, 22, 81, 23, 34, 99, 22, 17, 8]))\nprint(qs([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]))\n",
"step-4": "# def qs(li):\n# n = len(li)\n# if n <= 1:\n# return li\n# pivot = li[n - 1]\n# left = []\n# right = []\n# for i in li[:n - 1]:\n# if i <= pivot:\n# left.append(i)\n# else:\n# right.append(i)\n# left = qs(left)\n# right = qs(right)\n# return left + [pivot] + right\n\n\ndef qs(li):\n n, p = len(li), len(li)//2 - 1\n if n <= 1:\n return li\n\n\nprint(qs([11, 45, 23, 81, 28, 34]))\nprint(qs([11, 45, 22, 81, 23, 34, 99, 22, 17, 8]))\nprint(qs([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(2000):
squares[i * i] = i
<|reserved_special_token_0|>
for a in range(1, 1001):
for b in range(a + 1, 1001):
if a * a + b * b not in squares:
continue
c = squares[a * a + b * b]
perims[a + b + c] += 1
for perim, v in sorted(perims.items(), key=lambda x: x[1]):
if v > 1 and perim <= 1000:
print(perim, v)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
squares = dict()
for i in range(2000):
squares[i * i] = i
perims = defaultdict(int)
for a in range(1, 1001):
for b in range(a + 1, 1001):
if a * a + b * b not in squares:
continue
c = squares[a * a + b * b]
perims[a + b + c] += 1
for perim, v in sorted(perims.items(), key=lambda x: x[1]):
if v > 1 and perim <= 1000:
print(perim, v)
<|reserved_special_token_1|>
from collections import defaultdict
squares = dict()
for i in range(2000):
squares[i * i] = i
perims = defaultdict(int)
for a in range(1, 1001):
for b in range(a + 1, 1001):
if a * a + b * b not in squares:
continue
c = squares[a * a + b * b]
perims[a + b + c] += 1
for perim, v in sorted(perims.items(), key=lambda x: x[1]):
if v > 1 and perim <= 1000:
print(perim, v)
|
flexible
|
{
"blob_id": "a3299a2945a638c74c2d16bc28079ed692718fbd",
"index": 2703,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2000):\n squares[i * i] = i\n<mask token>\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-3": "<mask token>\nsquares = dict()\nfor i in range(2000):\n squares[i * i] = i\nperims = defaultdict(int)\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-4": "from collections import defaultdict\nsquares = dict()\nfor i in range(2000):\n squares[i * i] = i\nperims = defaultdict(int)\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.sources.models.mixins.page_numbers import PageNumbersMixin
from apps.sources.models.source import Source
PIECE_TYPES = (('essay', 'Essay'),)
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(
verbose_name=_('piece type'),
max_length=TYPE_MAX_LENGTH,
choices=PIECE_TYPES,
default=PIECE_TYPES[0][0],
)
def __html__(self) -> str:
"""Return the piece's citation HTML string."""
components = [
self.attributee_html,
f'"{self.linked_title}"',
self.date.string if self.date else '',
]
return self.components_to_html(components)
|
normal
|
{
"blob_id": "30c24b9a4738c1952fc5d36a4bc36d8d3576ed3b",
"index": 7201,
"step-1": "<mask token>\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-2": "<mask token>\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-3": "<mask token>\nPIECE_TYPES = ('essay', 'Essay'),\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-4": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.sources.models.mixins.page_numbers import PageNumbersMixin\nfrom apps.sources.models.source import Source\nPIECE_TYPES = ('essay', 'Essay'),\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-5": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.sources.models.mixins.page_numbers import PageNumbersMixin\nfrom apps.sources.models.source import Source\n\nPIECE_TYPES = (('essay', 'Essay'),)\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n\n type = models.CharField(\n verbose_name=_('piece type'),\n max_length=TYPE_MAX_LENGTH,\n choices=PIECE_TYPES,\n default=PIECE_TYPES[0][0],\n )\n\n def __html__(self) -> str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [\n self.attributee_html,\n f'\"{self.linked_title}\"',\n self.date.string if self.date else '',\n ]\n return self.components_to_html(components)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
ii = [('CoolWHM.py', 1), ('SoutRD.py', 1), ('BrewDTO.py', 2), (
'FitzRNS2.py', 1), ('LyelCPG3.py', 1), ('TaylIF.py', 2)]
|
normal
|
{
"blob_id": "fbba928d51ccd08dbac25fcf2098be3a0d494d34",
"index": 6659,
"step-1": "<mask token>\n",
"step-2": "ii = [('CoolWHM.py', 1), ('SoutRD.py', 1), ('BrewDTO.py', 2), (\n 'FitzRNS2.py', 1), ('LyelCPG3.py', 1), ('TaylIF.py', 2)]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import socket
from time import time, sleep
from threading import Thread
# Define drone
class dm107s():
# Default control value
def __init__(self):
# 4 values for flight
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
# 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration
self.commands = 0
# Required for wifi control
self.onoff = 1
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple calibrate button presses
self._calibrate_flag = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
#self.sess.connect(('192.168.100.1', 19798))
# Initialize timer value
self._takeoff_timer = 0
self._calibrate_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Turn hex to byte package
def _get_packet(self):
self._hex_code = self.get_hex()
self.package = bytes.fromhex(self._hex_code)
return self.package
# Send control to drone
def send_ctrl(self):
while not self._stopped:
self._package = self._get_packet()
#self.sess.send(self._package)
self.sess.sendto(self._package, ('192.168.100.1', 19798))
self.Flag_off()
sleep(0.02)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
# Pitch forward
def pitch_fwd(self):
self.pitch += 20
if self.pitch > 248:
self.pitch = 248
# Increase throttle
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
# Yaw right
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
# Roll left
def roll_left(self):
self.roll -= 20
if self.roll < 18:
self.roll = 18
# Pitch backward
def pitch_bwd(self):
self.pitch -= 20
if self.pitch < 18:
self.pitch = 18
# Decrease throttle
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
# Yaw left
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Landing
def land(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):
self.commands = 0
self._takeoff_flag = False
if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):
self.commands = 0
self.onoff = 1
self._calibrate_flag = False
# Stop IMMEDIATELY
def emergency_stop(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 2
self.onoff = 1
self._takeoff_flag = False
# Calibrate gyroscope
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza():
# Default control value
def __init__(self, ip, port):
# 4 values for flight
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple ignite button presses
self._ignite_flag = False
self._ignite_send = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
#self.sess.connect((ip, port))
# Initialize timer value
self._ignite_timer = 0
self._takeoff_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Send control to drone
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
#self.sess.send(self._package)
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
if self.roll < 15:
self.roll += 1
# Pitch forward
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
# Increase throttle
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
# Yaw right
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
# Roll left
def roll_left(self):
if self.roll > 0:
self.roll -= 1
# Pitch backward
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
# Decrease throttle
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
# Yaw left
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
# Start engine
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if self._ignite_flag == True:
if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
# Warming up engine
if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):
self.throttle = 2
if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):
self.throttle = 4
if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):
self.throttle = 6
if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):
self.throttle = 8
# After starting engine, takeoff after 4s
if (time() - self._ignite_timer >= 4):
self._ignite_flag = False
self.takeoff()
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):
self.throttle = 8
self._takeoff_flag = False
|
normal
|
{
"blob_id": "ee8e117db0348aa37d6aa37e6c06255101f1cff4",
"index": 2752,
"step-1": "<mask token>\n\n\nclass dm107s:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-2": "<mask token>\n\n\nclass dm107s:\n <mask token>\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n <mask token>\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n <mask token>\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-3": "<mask token>\n\n\nclass dm107s:\n <mask token>\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n <mask token>\n\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n <mask token>\n <mask token>\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n <mask token>\n\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-4": "<mask token>\n\n\nclass dm107s:\n\n def __init__(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n self._calibrate_flag = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self._takeoff_timer = 0\n self._calibrate_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (26122 << 144 | self.roll << 136 | self.pitch <<\n 128 | self.throttle << 120 | self.yaw << 112 | self.commands <<\n 104 | self.onoff * 2 << 96 | 65535 << 80 | (self.roll ^ self.\n pitch ^ self.throttle ^ self.yaw ^ self.commands ^ self.onoff *\n 2) << 8 | 153)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n\n def send_ctrl(self):\n while not self._stopped:\n self._package = self._get_packet()\n self.sess.sendto(self._package, ('192.168.100.1', 19798))\n self.Flag_off()\n sleep(0.02)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n <mask token>\n\n def emergency_stop(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 2\n self.onoff = 1\n self._takeoff_flag = False\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-5": "import socket\nfrom time import time, sleep\nfrom threading import Thread\n\n# Define drone\nclass dm107s():\n # Default control value\n def __init__(self):\n # 4 values for flight\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n # 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration\n self.commands = 0\n # Required for wifi control\n self.onoff = 1\n # Prevent multiple takeoff button presses\n self._takeoff_flag = False\n # Prevent multiple calibrate button presses\n self._calibrate_flag = False\n # Connect to UDP port\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n #self.sess.connect(('192.168.100.1', 19798))\n # Initialize timer value\n self._takeoff_timer = 0\n self._calibrate_timer = 0\n # Flag to stop thread\n self._stopped = False\n \n # Start separated thread for drone control\n def start(self): \n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n \n # Get command hex for drone\n def get_hex(self):\n # XOR is for checksum\n self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)\n self.command_out = hex(self.command_out)[2::]\n return self.command_out\n \n # Turn hex to byte package\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n \n # Send control to drone\n def send_ctrl(self):\n while not self._stopped:\n self._package = self._get_packet()\n #self.sess.send(self._package)\n self.sess.sendto(self._package, ('192.168.100.1', 19798))\n self.Flag_off()\n sleep(0.02)\n \n # Close connection to drone\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n \n # Return to default\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n \n # Increment control\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change\n \n # Roll right\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n \n # Pitch forward\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n \n # Increase throttle\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n \n # Yaw right\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n \n # Roll left\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n \n # Pitch backward\n def pitch_bwd(self):\n self.pitch -= 20\n if self.pitch < 18:\n self.pitch = 18\n \n # Decrease throttle\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n \n # Yaw left\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n \n # Takeoff\n def takeoff(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Landing\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Flip takeoff flag\n def Flag_off(self):\n if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):\n self.commands = 0\n self._takeoff_flag = False\n if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):\n self.commands = 0\n self.onoff = 1\n self._calibrate_flag = False\n\n # Stop IMMEDIATELY\n def emergency_stop(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 2\n self.onoff = 1\n self._takeoff_flag = False\n \n # Calibrate gyroscope\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\nclass naza():\n # Default control value\n def __init__(self, ip, port):\n # 4 values for flight\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n # Prevent multiple takeoff button presses\n self._takeoff_flag = False\n # Prevent multiple ignite button presses\n self._ignite_flag = False\n self._ignite_send = False\n # Connect to UDP port\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n #self.sess.connect((ip, port))\n # Initialize timer value\n self._ignite_timer = 0\n self._takeoff_timer = 0\n # Flag to stop thread\n self._stopped = False\n \n # Start separated thread for drone control\n def start(self): \n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n \n # Get command hex for drone\n def get_hex(self):\n # XOR is for checksum\n self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)\n self.command_out = hex(self.command_out)[2::]\n return self.command_out\n \n # Send control to drone\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n #self.sess.send(self._package)\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n \n # Close connection to drone\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n \n # Return to default\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n \n # Increment control\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change\n \n # Roll right\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n \n # Pitch forward\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n \n # Increase throttle\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n \n # Yaw right\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n \n # Roll left\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n \n # Pitch backward\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n \n # Decrease throttle\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n \n # Yaw left\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n \n # Start engine\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n \n # Takeoff\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Flip takeoff flag\n def Flag_off(self):\n if self._ignite_flag == True:\n if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n # Warming up engine\n if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):\n self.throttle = 2\n if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):\n self.throttle = 4\n if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):\n self.throttle = 6\n if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):\n self.throttle = 8\n # After starting engine, takeoff after 4s\n if (time() - self._ignite_timer >= 4):\n self._ignite_flag = False\n self.takeoff()\n if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-ids": [
23,
29,
33,
39,
43
]
}
|
[
23,
29,
33,
39,
43
] |
<|reserved_special_token_0|>
class SequenceList(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = (
"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'"
.format(hours))
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute('delete from sequence_list_item where seq_id = %s',
[seq_id])
cur.execute('delete from sequence_list where id = %s', [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith('>'):
result = ''
cnt = 1
for line in data.split('\n'):
if line:
result += '>seq{}\n'.format(cnt)
result += line
result += '\n'
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({'idx': cnt, 'name': seq.name, 'sequence': str(
seq.seq)})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException('Error: Duplicate sequence names found.',
ErrorType.INVALID_SEQUENCE_DATA)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SequenceList(object):
<|reserved_special_token_0|>
def __init__(self, seq_uuid):
"""
Setup sequence list with primary key seq_uuid.
:param seq_uuid: str: uuid that uniquely represents this list.
"""
if not seq_uuid:
raise ValueError("SequenceList uuid must have a value yours:'{}'."
.format(seq_uuid))
self.seq_uuid = seq_uuid
self.content = None
self.created = None
self.title = None
def insert(self, db):
"""
Save self.contents to the database under self.seq_uuid.
:param db: database connection
"""
if not self.content:
raise ValueError(
'SequenceList content property must be filled in before calling save.'
)
if not self.title:
raise ValueError(
'SequenceList title property must be filled in before calling save.'
)
seq_item_list = SequenceListItems(self.content)
cur = db.cursor()
self._insert_data(cur, seq_item_list, self.title)
cur.close()
db.commit()
def _insert_data(self, cur, item_list, title):
cur.execute(
'insert into sequence_list(id, data, title) values(%s, %s, %s)',
[self.seq_uuid, item_list.data, title])
for item in item_list.items:
cur.execute(
'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'
, [self.seq_uuid, item['idx'], item['name'], item['sequence']])
def load(self, db):
"""
Load self.contents from the database based on self.seq_uuid.
:param db: database connection
"""
rows = read_database(db,
'select data, created, title from sequence_list where id = %s',
[self.seq_uuid])
if not rows:
raise KeyError('Unable to find sequence for {}'.format(self.
seq_uuid))
first_row = rows[0]
self.content = first_row[0]
self.created = first_row[1]
self.title = first_row[2]
@staticmethod
def create_with_content_and_title(db, content, title):
"""
Saves content into the database under a new uuid.
:param db: database connection
:param content: str: FASTA file data to save in the database
:return: str: new uuid created for this content
"""
sequence_list = SequenceList(str(uuid.uuid1()))
sequence_list.content = content
sequence_list.title = title
sequence_list.insert(db)
return sequence_list.seq_uuid
@staticmethod
def read_list(db, seq_uuid):
"""
Lookup the content from the database via the seq_uuid provided.
:param db: database connection
:param seq_uuid: str: uuid to lookup
:return: str: FASTA file data associated with the seq_uuid
"""
sequence_list = SequenceList(seq_uuid)
sequence_list.load(db)
return sequence_list
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = (
"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'"
.format(hours))
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute('delete from sequence_list_item where seq_id = %s',
[seq_id])
cur.execute('delete from sequence_list where id = %s', [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith('>'):
result = ''
cnt = 1
for line in data.split('\n'):
if line:
result += '>seq{}\n'.format(cnt)
result += line
result += '\n'
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({'idx': cnt, 'name': seq.name, 'sequence': str(
seq.seq)})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException('Error: Duplicate sequence names found.',
ErrorType.INVALID_SEQUENCE_DATA)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SequenceList(object):
"""
CRUD for managing FASTA file contents in the database.
"""
def __init__(self, seq_uuid):
"""
Setup sequence list with primary key seq_uuid.
:param seq_uuid: str: uuid that uniquely represents this list.
"""
if not seq_uuid:
raise ValueError("SequenceList uuid must have a value yours:'{}'."
.format(seq_uuid))
self.seq_uuid = seq_uuid
self.content = None
self.created = None
self.title = None
def insert(self, db):
"""
Save self.contents to the database under self.seq_uuid.
:param db: database connection
"""
if not self.content:
raise ValueError(
'SequenceList content property must be filled in before calling save.'
)
if not self.title:
raise ValueError(
'SequenceList title property must be filled in before calling save.'
)
seq_item_list = SequenceListItems(self.content)
cur = db.cursor()
self._insert_data(cur, seq_item_list, self.title)
cur.close()
db.commit()
def _insert_data(self, cur, item_list, title):
cur.execute(
'insert into sequence_list(id, data, title) values(%s, %s, %s)',
[self.seq_uuid, item_list.data, title])
for item in item_list.items:
cur.execute(
'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'
, [self.seq_uuid, item['idx'], item['name'], item['sequence']])
def load(self, db):
"""
Load self.contents from the database based on self.seq_uuid.
:param db: database connection
"""
rows = read_database(db,
'select data, created, title from sequence_list where id = %s',
[self.seq_uuid])
if not rows:
raise KeyError('Unable to find sequence for {}'.format(self.
seq_uuid))
first_row = rows[0]
self.content = first_row[0]
self.created = first_row[1]
self.title = first_row[2]
@staticmethod
def create_with_content_and_title(db, content, title):
"""
Saves content into the database under a new uuid.
:param db: database connection
:param content: str: FASTA file data to save in the database
:return: str: new uuid created for this content
"""
sequence_list = SequenceList(str(uuid.uuid1()))
sequence_list.content = content
sequence_list.title = title
sequence_list.insert(db)
return sequence_list.seq_uuid
@staticmethod
def read_list(db, seq_uuid):
"""
Lookup the content from the database via the seq_uuid provided.
:param db: database connection
:param seq_uuid: str: uuid to lookup
:return: str: FASTA file data associated with the seq_uuid
"""
sequence_list = SequenceList(seq_uuid)
sequence_list.load(db)
return sequence_list
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = (
"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'"
.format(hours))
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute('delete from sequence_list_item where seq_id = %s',
[seq_id])
cur.execute('delete from sequence_list where id = %s', [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith('>'):
result = ''
cnt = 1
for line in data.split('\n'):
if line:
result += '>seq{}\n'.format(cnt)
result += line
result += '\n'
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({'idx': cnt, 'name': seq.name, 'sequence': str(
seq.seq)})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException('Error: Duplicate sequence names found.',
ErrorType.INVALID_SEQUENCE_DATA)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import uuid
from pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data
from pred.queries.dbutil import update_database, read_database
from Bio import SeqIO
from io import StringIO
class SequenceList(object):
"""
CRUD for managing FASTA file contents in the database.
"""
def __init__(self, seq_uuid):
"""
Setup sequence list with primary key seq_uuid.
:param seq_uuid: str: uuid that uniquely represents this list.
"""
if not seq_uuid:
raise ValueError("SequenceList uuid must have a value yours:'{}'."
.format(seq_uuid))
self.seq_uuid = seq_uuid
self.content = None
self.created = None
self.title = None
def insert(self, db):
"""
Save self.contents to the database under self.seq_uuid.
:param db: database connection
"""
if not self.content:
raise ValueError(
'SequenceList content property must be filled in before calling save.'
)
if not self.title:
raise ValueError(
'SequenceList title property must be filled in before calling save.'
)
seq_item_list = SequenceListItems(self.content)
cur = db.cursor()
self._insert_data(cur, seq_item_list, self.title)
cur.close()
db.commit()
def _insert_data(self, cur, item_list, title):
cur.execute(
'insert into sequence_list(id, data, title) values(%s, %s, %s)',
[self.seq_uuid, item_list.data, title])
for item in item_list.items:
cur.execute(
'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'
, [self.seq_uuid, item['idx'], item['name'], item['sequence']])
def load(self, db):
"""
Load self.contents from the database based on self.seq_uuid.
:param db: database connection
"""
rows = read_database(db,
'select data, created, title from sequence_list where id = %s',
[self.seq_uuid])
if not rows:
raise KeyError('Unable to find sequence for {}'.format(self.
seq_uuid))
first_row = rows[0]
self.content = first_row[0]
self.created = first_row[1]
self.title = first_row[2]
@staticmethod
def create_with_content_and_title(db, content, title):
"""
Saves content into the database under a new uuid.
:param db: database connection
:param content: str: FASTA file data to save in the database
:return: str: new uuid created for this content
"""
sequence_list = SequenceList(str(uuid.uuid1()))
sequence_list.content = content
sequence_list.title = title
sequence_list.insert(db)
return sequence_list.seq_uuid
@staticmethod
def read_list(db, seq_uuid):
"""
Lookup the content from the database via the seq_uuid provided.
:param db: database connection
:param seq_uuid: str: uuid to lookup
:return: str: FASTA file data associated with the seq_uuid
"""
sequence_list = SequenceList(seq_uuid)
sequence_list.load(db)
return sequence_list
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = (
"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'"
.format(hours))
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute('delete from sequence_list_item where seq_id = %s',
[seq_id])
cur.execute('delete from sequence_list where id = %s', [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith('>'):
result = ''
cnt = 1
for line in data.split('\n'):
if line:
result += '>seq{}\n'.format(cnt)
result += line
result += '\n'
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({'idx': cnt, 'name': seq.name, 'sequence': str(
seq.seq)})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException('Error: Duplicate sequence names found.',
ErrorType.INVALID_SEQUENCE_DATA)
<|reserved_special_token_1|>
"""
Stores custom FASTA sequences under a uuid in the database.
Part of the tables used for custom jobs.
"""
import uuid
from pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data
from pred.queries.dbutil import update_database, read_database
from Bio import SeqIO
from io import StringIO
class SequenceList(object):
"""
CRUD for managing FASTA file contents in the database.
"""
def __init__(self, seq_uuid):
"""
Setup sequence list with primary key seq_uuid.
:param seq_uuid: str: uuid that uniquely represents this list.
"""
if not seq_uuid:
raise ValueError("SequenceList uuid must have a value yours:'{}'.".format(seq_uuid))
self.seq_uuid = seq_uuid
self.content = None
self.created = None
self.title = None
def insert(self, db):
"""
Save self.contents to the database under self.seq_uuid.
:param db: database connection
"""
if not self.content:
raise ValueError("SequenceList content property must be filled in before calling save.")
if not self.title:
raise ValueError("SequenceList title property must be filled in before calling save.")
seq_item_list = SequenceListItems(self.content)
cur = db.cursor()
self._insert_data(cur, seq_item_list, self.title)
cur.close()
db.commit()
def _insert_data(self, cur, item_list, title):
cur.execute("insert into sequence_list(id, data, title) values(%s, %s, %s)",
[self.seq_uuid, item_list.data, title])
for item in item_list.items:
cur.execute("insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)",
[self.seq_uuid, item['idx'], item['name'], item['sequence']])
def load(self, db):
"""
Load self.contents from the database based on self.seq_uuid.
:param db: database connection
"""
rows = read_database(db, "select data, created, title from sequence_list where id = %s", [self.seq_uuid])
if not rows:
raise KeyError("Unable to find sequence for {}".format(self.seq_uuid))
first_row = rows[0]
self.content = first_row[0]
self.created = first_row[1]
self.title = first_row[2]
@staticmethod
def create_with_content_and_title(db, content, title):
"""
Saves content into the database under a new uuid.
:param db: database connection
:param content: str: FASTA file data to save in the database
:return: str: new uuid created for this content
"""
sequence_list = SequenceList(str(uuid.uuid1()))
sequence_list.content = content
sequence_list.title = title
sequence_list.insert(db)
return sequence_list.seq_uuid
@staticmethod
def read_list(db, seq_uuid):
"""
Lookup the content from the database via the seq_uuid provided.
:param db: database connection
:param seq_uuid: str: uuid to lookup
:return: str: FASTA file data associated with the seq_uuid
"""
sequence_list = SequenceList(seq_uuid)
sequence_list.load(db)
return sequence_list
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = "select sequence_list.id from sequence_list " \
" left outer join job on sequence_list.id = job.seq_id " \
" where job.id is null " \
" and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'".format(hours)
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute("delete from sequence_list_item where seq_id = %s", [seq_id])
cur.execute("delete from sequence_list where id = %s", [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith(">"):
result = ""
cnt = 1
for line in data.split('\n'):
if line:
result += ">seq{}\n".format(cnt)
result += line
result += "\n"
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({
'idx': cnt,
'name': seq.name,
'sequence': str(seq.seq)
})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException("Error: Duplicate sequence names found.", ErrorType.INVALID_SEQUENCE_DATA)
|
flexible
|
{
"blob_id": "2e744c0cbddf64a9c538c9f33fa19ff78c515012",
"index": 6797,
"step-1": "<mask token>\n\n\nclass SequenceList(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-2": "<mask token>\n\n\nclass SequenceList(object):\n <mask token>\n\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\"\n .format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\n 'SequenceList content property must be filled in before calling save.'\n )\n if not self.title:\n raise ValueError(\n 'SequenceList title property must be filled in before calling save.'\n )\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\n 'insert into sequence_list(id, data, title) values(%s, %s, %s)',\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\n 'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'\n , [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db,\n 'select data, created, title from sequence_list where id = %s',\n [self.seq_uuid])\n if not rows:\n raise KeyError('Unable to find sequence for {}'.format(self.\n seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-3": "<mask token>\n\n\nclass SequenceList(object):\n \"\"\"\n CRUD for managing FASTA file contents in the database.\n \"\"\"\n\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\"\n .format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\n 'SequenceList content property must be filled in before calling save.'\n )\n if not self.title:\n raise ValueError(\n 'SequenceList title property must be filled in before calling save.'\n )\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\n 'insert into sequence_list(id, data, title) values(%s, %s, %s)',\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\n 'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'\n , [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db,\n 'select data, created, title from sequence_list where id = %s',\n [self.seq_uuid])\n if not rows:\n raise KeyError('Unable to find sequence for {}'.format(self.\n seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-4": "<mask token>\nimport uuid\nfrom pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data\nfrom pred.queries.dbutil import update_database, read_database\nfrom Bio import SeqIO\nfrom io import StringIO\n\n\nclass SequenceList(object):\n \"\"\"\n CRUD for managing FASTA file contents in the database.\n \"\"\"\n\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\"\n .format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\n 'SequenceList content property must be filled in before calling save.'\n )\n if not self.title:\n raise ValueError(\n 'SequenceList title property must be filled in before calling save.'\n )\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\n 'insert into sequence_list(id, data, title) values(%s, %s, %s)',\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\n 'insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)'\n , [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db,\n 'select data, created, title from sequence_list where id = %s',\n [self.seq_uuid])\n if not rows:\n raise KeyError('Unable to find sequence for {}'.format(self.\n seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = (\n \"select sequence_list.id from sequence_list left outer join job on sequence_list.id = job.seq_id where job.id is null and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\"\n .format(hours))\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute('delete from sequence_list_item where seq_id = %s',\n [seq_id])\n cur.execute('delete from sequence_list where id = %s', [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith('>'):\n result = ''\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += '>seq{}\\n'.format(cnt)\n result += line\n result += '\\n'\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({'idx': cnt, 'name': seq.name, 'sequence': str(\n seq.seq)})\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException('Error: Duplicate sequence names found.',\n ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-5": "\"\"\"\nStores custom FASTA sequences under a uuid in the database.\nPart of the tables used for custom jobs.\n\"\"\"\nimport uuid\nfrom pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data\nfrom pred.queries.dbutil import update_database, read_database\nfrom Bio import SeqIO\nfrom io import StringIO\n\nclass SequenceList(object):\n \"\"\"\n CRUD for managing FASTA file contents in the database.\n \"\"\"\n def __init__(self, seq_uuid):\n \"\"\"\n Setup sequence list with primary key seq_uuid.\n :param seq_uuid: str: uuid that uniquely represents this list.\n \"\"\"\n if not seq_uuid:\n raise ValueError(\"SequenceList uuid must have a value yours:'{}'.\".format(seq_uuid))\n self.seq_uuid = seq_uuid\n self.content = None\n self.created = None\n self.title = None\n\n def insert(self, db):\n \"\"\"\n Save self.contents to the database under self.seq_uuid.\n :param db: database connection\n \"\"\"\n if not self.content:\n raise ValueError(\"SequenceList content property must be filled in before calling save.\")\n if not self.title:\n raise ValueError(\"SequenceList title property must be filled in before calling save.\")\n seq_item_list = SequenceListItems(self.content)\n cur = db.cursor()\n self._insert_data(cur, seq_item_list, self.title)\n cur.close()\n db.commit()\n\n def _insert_data(self, cur, item_list, title):\n cur.execute(\"insert into sequence_list(id, data, title) values(%s, %s, %s)\",\n [self.seq_uuid, item_list.data, title])\n for item in item_list.items:\n cur.execute(\"insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)\",\n [self.seq_uuid, item['idx'], item['name'], item['sequence']])\n\n def load(self, db):\n \"\"\"\n Load self.contents from the database based on self.seq_uuid.\n :param db: database connection\n \"\"\"\n rows = read_database(db, \"select data, created, title from sequence_list where id = %s\", [self.seq_uuid])\n if not rows:\n raise KeyError(\"Unable to find sequence for {}\".format(self.seq_uuid))\n first_row = rows[0]\n self.content = first_row[0]\n self.created = first_row[1]\n self.title = first_row[2]\n\n @staticmethod\n def create_with_content_and_title(db, content, title):\n \"\"\"\n Saves content into the database under a new uuid.\n :param db: database connection\n :param content: str: FASTA file data to save in the database\n :return: str: new uuid created for this content\n \"\"\"\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid\n\n @staticmethod\n def read_list(db, seq_uuid):\n \"\"\"\n Lookup the content from the database via the seq_uuid provided.\n :param db: database connection\n :param seq_uuid: str: uuid to lookup\n :return: str: FASTA file data associated with the seq_uuid\n \"\"\"\n sequence_list = SequenceList(seq_uuid)\n sequence_list.load(db)\n return sequence_list\n\n @staticmethod\n def delete_old_and_unattached(cur, hours):\n result = []\n select_sql = \"select sequence_list.id from sequence_list \" \\\n \" left outer join job on sequence_list.id = job.seq_id \" \\\n \" where job.id is null \" \\\n \" and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'\".format(hours)\n cur.execute(select_sql, [])\n for row in cur.fetchall():\n seq_id = row[0]\n cur.execute(\"delete from sequence_list_item where seq_id = %s\", [seq_id])\n cur.execute(\"delete from sequence_list where id = %s\", [seq_id])\n return result\n\n\nclass SequenceListItems(object):\n \"\"\"\n Record per sequence name in SequenceList.\n Used to lookup sequence for results.\n \"\"\"\n def __init__(self, data):\n raise_on_too_big_uploaded_data(data)\n self.data = SequenceListItems.make_fasta(data.strip())\n self.items = SequenceListItems.find_sequence_items(self.data)\n\n @staticmethod\n def make_fasta(data):\n \"\"\"\n Convert string to FASTA if necessary.\n :param data: str: input value either FASTA or newline separated sequences\n :return: str: FASTA data\n \"\"\"\n result = data\n if not data.startswith(\">\"):\n result = \"\"\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += \">seq{}\\n\".format(cnt)\n result += line\n result += \"\\n\"\n cnt += 1\n return result.strip()\n\n @staticmethod\n def find_sequence_items(data):\n \"\"\"\n Parse FASTA data and return a list of {idx, name, sequence}.\n :param data: str: FASTA data to parse\n :return: [dict]: sequences in the FASTA data\n \"\"\"\n results = []\n cnt = 1\n seqs = SeqIO.parse(StringIO(data), 'fasta')\n for seq in seqs:\n results.append({\n 'idx': cnt,\n 'name': seq.name,\n 'sequence': str(seq.seq)\n })\n cnt += 1\n SequenceListItems.verify_unique_names(results)\n return results\n\n @staticmethod\n def verify_unique_names(items):\n \"\"\"\n Make sure that we don't have any duplicate names in the list.\n Raises UserFacingException if the names are duplicated.\n :param items: [{}]: list of dictionaries with name property to check\n \"\"\"\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)\n",
"step-ids": [
8,
14,
15,
16,
17
]
}
|
[
8,
14,
15,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('circumference is ', circumference)
print('diameter is: ', diameter)
print('area is ', area)
<|reserved_special_token_1|>
radius = int(input('enter the value for the radius of the cycle: '))
circumference = 2 * 3.14159 * radius
diameter = 2 * radius
area = 3.14159 * radius ** 2
print('circumference is ', circumference)
print('diameter is: ', diameter)
print('area is ', area)
<|reserved_special_token_1|>
radius = int(input("enter the value for the radius of the cycle: "))
circumference = 2 * 3.14159 * radius
diameter = 2 * radius
area = 3.14159 * radius ** 2
print('circumference is ', circumference)
print('diameter is: ', diameter)
print('area is ', area)
|
flexible
|
{
"blob_id": "ab5412a3d22bd53a592c93bad4870b06fd9f0720",
"index": 4080,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('circumference is ', circumference)\nprint('diameter is: ', diameter)\nprint('area is ', area)\n",
"step-3": "radius = int(input('enter the value for the radius of the cycle: '))\ncircumference = 2 * 3.14159 * radius\ndiameter = 2 * radius\narea = 3.14159 * radius ** 2\nprint('circumference is ', circumference)\nprint('diameter is: ', diameter)\nprint('area is ', area)\n",
"step-4": "radius = int(input(\"enter the value for the radius of the cycle: \"))\ncircumference = 2 * 3.14159 * radius\ndiameter = 2 * radius\narea = 3.14159 * radius ** 2\n\nprint('circumference is ', circumference)\nprint('diameter is: ', diameter)\nprint('area is ', area)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(filename, lines.
get_line_number(node.start), name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(ast.ASTBuilder(iter(tokens), filename).
generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {node.name: node for node in ast_list if
isinstance(node, ast.VariableDeclaration) and 'static' in node.type
.modifiers}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(filename, lines.
get_line_number(static_declarations[name].start), name))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(filename, lines.
get_line_number(node.start), name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(ast.ASTBuilder(iter(tokens), filename).
generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {node.name: node for node in ast_list if
isinstance(node, ast.VariableDeclaration) and 'static' in node.type
.modifiers}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(filename, lines.
get_line_number(static_declarations[name].start), name))
def run(filename, source, entire_ast, include_paths, quiet):
lines = metrics.Metrics(source)
_find_warnings(filename, lines, entire_ast, True)
_find_unused_static_warnings(filename, lines, entire_ast)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(filename, lines.
get_line_number(node.start), name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(ast.ASTBuilder(iter(tokens), filename).
generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {node.name: node for node in ast_list if
isinstance(node, ast.VariableDeclaration) and 'static' in node.type
.modifiers}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(filename, lines.
get_line_number(static_declarations[name].start), name))
def run(filename, source, entire_ast, include_paths, quiet):
lines = metrics.Metrics(source)
_find_warnings(filename, lines, entire_ast, True)
_find_unused_static_warnings(filename, lines, entire_ast)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
from __future__ import unicode_literals
import collections
from . import ast
from . import metrics
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(filename, lines.
get_line_number(node.start), name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(ast.ASTBuilder(iter(tokens), filename).
generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {node.name: node for node in ast_list if
isinstance(node, ast.VariableDeclaration) and 'static' in node.type
.modifiers}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(filename, lines.
get_line_number(static_declarations[name].start), name))
def run(filename, source, entire_ast, include_paths, quiet):
lines = metrics.Metrics(source)
_find_warnings(filename, lines, entire_ast, True)
_find_unused_static_warnings(filename, lines, entire_ast)
<|reserved_special_token_1|>
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes, functions and modules which contain static data."""
from __future__ import print_function
from __future__ import unicode_literals
import collections
from . import ast
from . import metrics
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(
filename,
lines.get_line_number(node.start),
name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(
ast.ASTBuilder(iter(tokens), filename).generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
# Ignore 'static' at module scope so we can find globals too.
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {
node.name: node
for node in ast_list
if (isinstance(node, ast.VariableDeclaration) and
'static' in node.type.modifiers)
}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(
filename,
lines.get_line_number(static_declarations[name].start),
name))
def run(filename, source, entire_ast, include_paths, quiet):
lines = metrics.Metrics(source)
_find_warnings(filename, lines, entire_ast, True)
_find_unused_static_warnings(filename, lines, entire_ast)
|
flexible
|
{
"blob_id": "57d1fb805fce2ba75ea2962598e809ba35fd7eb6",
"index": 3490,
"step-1": "<mask token>\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-3": "<mask token>\n__author__ = 'nnorwitz@google.com (Neal Norwitz)'\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport collections\nfrom . import ast\nfrom . import metrics\n__author__ = 'nnorwitz@google.com (Neal Norwitz)'\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-5": "# Copyright 2008 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Print classes, functions and modules which contain static data.\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\n\nfrom . import ast\nfrom . import metrics\n\n\n__author__ = 'nnorwitz@google.com (Neal Norwitz)'\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(\n filename,\n lines.get_line_number(node.start),\n name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(\n ast.ASTBuilder(iter(tokens), filename).generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n # Ignore 'static' at module scope so we can find globals too.\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {\n node.name: node\n for node in ast_list\n if (isinstance(node, ast.VariableDeclaration) and\n 'static' in node.type.modifiers)\n }\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(\n filename,\n lines.get_line_number(static_declarations[name].start),\n name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
<|reserved_special_token_0|>
@authentication.route('/logout')
def logout():
logout_user()
flash('You have successfully logged out!', 'info')
return redirect(url_for('authentication.login'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
@authentication.route('/login', methods=['GET', 'POST'])
def login():
form = Login()
user = User.query.filter_by(email=request.form.get('email')).first()
if form.validate_on_submit():
if user is None or not user.check_password(request.form.get('password')
):
flash('You have entered incorrect details, please try again',
'danger')
return redirect(url_for('authentication.login'))
login_user(user)
flash('You have successfully logged in!', 'success')
return redirect(url_for('main.index'))
content = {'form': form}
return render_template('login.html', **content)
@authentication.route('/logout')
def logout():
logout_user()
flash('You have successfully logged out!', 'info')
return redirect(url_for('authentication.login'))
<|reserved_special_token_1|>
from . import bp as authentication
from app import db
from flask import current_app as app, render_template, request, redirect, url_for, flash, session
from flask_login import login_user, logout_user, current_user, login_required
from .forms import Register, Login, Settings
from .models import User
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
@authentication.route('/login', methods=['GET', 'POST'])
def login():
form = Login()
user = User.query.filter_by(email=request.form.get('email')).first()
if form.validate_on_submit():
if user is None or not user.check_password(request.form.get('password')
):
flash('You have entered incorrect details, please try again',
'danger')
return redirect(url_for('authentication.login'))
login_user(user)
flash('You have successfully logged in!', 'success')
return redirect(url_for('main.index'))
content = {'form': form}
return render_template('login.html', **content)
@authentication.route('/logout')
def logout():
logout_user()
flash('You have successfully logged out!', 'info')
return redirect(url_for('authentication.login'))
<|reserved_special_token_1|>
from .import bp as authentication
from app import db
from flask import current_app as app, render_template, request, redirect, url_for, flash, session
from flask_login import login_user, logout_user, current_user, login_required
from .forms import Register, Login, Settings
from .models import User
# route for register using a WTForm
@authentication.route('/register', methods=['GET', 'POST'])
def register():
# set an instance of the form
form = Register()
if form.validate_on_submit():
# collect the data from the form into a dictionary
data = {
'first_name' : request.form.get('first_name'),
'last_name' : request.form.get('last_name'),
'email' : request.form.get('email'),
'password' : request.form.get('password')
}
# create an instance of the User class using the data dictionary
u = User(first_name=data['first_name'], last_name=data['last_name'], email=data['email'], password=data['password'])
# securing the password
u.hash_pass(u.password)
# adding the user to the database
db.session.add(u)
db.session.commit()
# confirmations
flash("You have succesfully registered!", 'primary')
# send them to the login page
return redirect(url_for("authentication.login"))
# sending the form model to the HTML page for rendering
content = {
'form': form
}
return render_template('register.html', **content)
# route for login using a WTform
@authentication.route('/login', methods=['GET', 'POST'])
def login():
# set an instance of the form
form = Login()
user = User.query.filter_by(email=request.form.get('email')).first()
if form.validate_on_submit():
# check if the info is correct
if user is None or not user.check_password(request.form.get('password')):
flash("You have entered incorrect details, please try again", 'danger')
return redirect(url_for('authentication.login'))
login_user(user)
flash("You have successfully logged in!", 'success')
return redirect(url_for('main.index'))
# sending the form model to the HTML page for rendering
content = {
'form' : form
}
return render_template('login.html', **content)
# logout route, pretty simple
@authentication.route('/logout')
def logout():
logout_user()
flash("You have successfully logged out!", 'info')
return redirect(url_for("authentication.login"))
|
flexible
|
{
"blob_id": "74faeb1c09fe136ec4d9578173aeebe54b451e33",
"index": 2406,
"step-1": "<mask token>\n\n\n@authentication.route('/register', methods=['GET', 'POST'])\ndef register():\n form = Register()\n if form.validate_on_submit():\n data = {'first_name': request.form.get('first_name'), 'last_name':\n request.form.get('last_name'), 'email': request.form.get(\n 'email'), 'password': request.form.get('password')}\n u = User(first_name=data['first_name'], last_name=data['last_name'],\n email=data['email'], password=data['password'])\n u.hash_pass(u.password)\n db.session.add(u)\n db.session.commit()\n flash('You have succesfully registered!', 'primary')\n return redirect(url_for('authentication.login'))\n content = {'form': form}\n return render_template('register.html', **content)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@authentication.route('/register', methods=['GET', 'POST'])\ndef register():\n form = Register()\n if form.validate_on_submit():\n data = {'first_name': request.form.get('first_name'), 'last_name':\n request.form.get('last_name'), 'email': request.form.get(\n 'email'), 'password': request.form.get('password')}\n u = User(first_name=data['first_name'], last_name=data['last_name'],\n email=data['email'], password=data['password'])\n u.hash_pass(u.password)\n db.session.add(u)\n db.session.commit()\n flash('You have succesfully registered!', 'primary')\n return redirect(url_for('authentication.login'))\n content = {'form': form}\n return render_template('register.html', **content)\n\n\n<mask token>\n\n\n@authentication.route('/logout')\ndef logout():\n logout_user()\n flash('You have successfully logged out!', 'info')\n return redirect(url_for('authentication.login'))\n",
"step-3": "<mask token>\n\n\n@authentication.route('/register', methods=['GET', 'POST'])\ndef register():\n form = Register()\n if form.validate_on_submit():\n data = {'first_name': request.form.get('first_name'), 'last_name':\n request.form.get('last_name'), 'email': request.form.get(\n 'email'), 'password': request.form.get('password')}\n u = User(first_name=data['first_name'], last_name=data['last_name'],\n email=data['email'], password=data['password'])\n u.hash_pass(u.password)\n db.session.add(u)\n db.session.commit()\n flash('You have succesfully registered!', 'primary')\n return redirect(url_for('authentication.login'))\n content = {'form': form}\n return render_template('register.html', **content)\n\n\n@authentication.route('/login', methods=['GET', 'POST'])\ndef login():\n form = Login()\n user = User.query.filter_by(email=request.form.get('email')).first()\n if form.validate_on_submit():\n if user is None or not user.check_password(request.form.get('password')\n ):\n flash('You have entered incorrect details, please try again',\n 'danger')\n return redirect(url_for('authentication.login'))\n login_user(user)\n flash('You have successfully logged in!', 'success')\n return redirect(url_for('main.index'))\n content = {'form': form}\n return render_template('login.html', **content)\n\n\n@authentication.route('/logout')\ndef logout():\n logout_user()\n flash('You have successfully logged out!', 'info')\n return redirect(url_for('authentication.login'))\n",
"step-4": "from . import bp as authentication\nfrom app import db\nfrom flask import current_app as app, render_template, request, redirect, url_for, flash, session\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom .forms import Register, Login, Settings\nfrom .models import User\n\n\n@authentication.route('/register', methods=['GET', 'POST'])\ndef register():\n form = Register()\n if form.validate_on_submit():\n data = {'first_name': request.form.get('first_name'), 'last_name':\n request.form.get('last_name'), 'email': request.form.get(\n 'email'), 'password': request.form.get('password')}\n u = User(first_name=data['first_name'], last_name=data['last_name'],\n email=data['email'], password=data['password'])\n u.hash_pass(u.password)\n db.session.add(u)\n db.session.commit()\n flash('You have succesfully registered!', 'primary')\n return redirect(url_for('authentication.login'))\n content = {'form': form}\n return render_template('register.html', **content)\n\n\n@authentication.route('/login', methods=['GET', 'POST'])\ndef login():\n form = Login()\n user = User.query.filter_by(email=request.form.get('email')).first()\n if form.validate_on_submit():\n if user is None or not user.check_password(request.form.get('password')\n ):\n flash('You have entered incorrect details, please try again',\n 'danger')\n return redirect(url_for('authentication.login'))\n login_user(user)\n flash('You have successfully logged in!', 'success')\n return redirect(url_for('main.index'))\n content = {'form': form}\n return render_template('login.html', **content)\n\n\n@authentication.route('/logout')\ndef logout():\n logout_user()\n flash('You have successfully logged out!', 'info')\n return redirect(url_for('authentication.login'))\n",
"step-5": "from .import bp as authentication\nfrom app import db\nfrom flask import current_app as app, render_template, request, redirect, url_for, flash, session\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom .forms import Register, Login, Settings\nfrom .models import User\n\n# route for register using a WTForm\n@authentication.route('/register', methods=['GET', 'POST'])\ndef register():\n # set an instance of the form\n form = Register()\n \n if form.validate_on_submit():\n # collect the data from the form into a dictionary\n data = {\n 'first_name' : request.form.get('first_name'),\n 'last_name' : request.form.get('last_name'),\n 'email' : request.form.get('email'),\n 'password' : request.form.get('password')\n }\n # create an instance of the User class using the data dictionary\n u = User(first_name=data['first_name'], last_name=data['last_name'], email=data['email'], password=data['password'])\n\n # securing the password\n u.hash_pass(u.password)\n\n # adding the user to the database\n db.session.add(u)\n db.session.commit()\n\n # confirmations\n flash(\"You have succesfully registered!\", 'primary')\n\n # send them to the login page\n return redirect(url_for(\"authentication.login\"))\n\n # sending the form model to the HTML page for rendering\n content = {\n 'form': form\n }\n return render_template('register.html', **content)\n\n# route for login using a WTform\n@authentication.route('/login', methods=['GET', 'POST'])\ndef login():\n # set an instance of the form\n form = Login()\n\n user = User.query.filter_by(email=request.form.get('email')).first()\n if form.validate_on_submit():\n\n # check if the info is correct\n if user is None or not user.check_password(request.form.get('password')):\n flash(\"You have entered incorrect details, please try again\", 'danger')\n return redirect(url_for('authentication.login'))\n login_user(user)\n flash(\"You have successfully logged in!\", 'success')\n return redirect(url_for('main.index'))\n\n # sending the form model to the HTML page for rendering\n content = {\n 'form' : form\n }\n return render_template('login.html', **content)\n\n# logout route, pretty simple\n@authentication.route('/logout')\ndef logout():\n logout_user()\n flash(\"You have successfully logged out!\", 'info')\n return redirect(url_for(\"authentication.login\"))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@register.simple_tag()
def multiplication(value, arg, *args, **kwargs):
return value * arg
@register.filter
def in_category(things, category):
return things.filter(category=category)
@register.simple_tag()
def division(value, arg, *args, **kwargs):
return value / arg
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.simple_tag()
def multiplication(value, arg, *args, **kwargs):
return value * arg
@register.filter
def in_category(things, category):
return things.filter(category=category)
@register.simple_tag()
def division(value, arg, *args, **kwargs):
return value / arg
@register.simple_tag()
def add(value, arg, *args, **kwargs):
return value + arg
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.simple_tag()
def multiplication(value, arg, *args, **kwargs):
return value * arg
@register.filter
def in_category(things, category):
return things.filter(category=category)
@register.simple_tag()
def division(value, arg, *args, **kwargs):
return value / arg
@register.simple_tag()
def add(value, arg, *args, **kwargs):
return value + arg
<|reserved_special_token_1|>
from django import template
import ast
register = template.Library()
@register.simple_tag()
def multiplication(value, arg, *args, **kwargs):
return value * arg
@register.filter
def in_category(things, category):
return things.filter(category=category)
@register.simple_tag()
def division(value, arg, *args, **kwargs):
return value / arg
@register.simple_tag()
def add(value, arg, *args, **kwargs):
return value + arg
|
flexible
|
{
"blob_id": "9339d3bc0c3005880b1c8d1c9914d6e28d39dbbd",
"index": 7285,
"step-1": "<mask token>\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n@register.simple_tag()\ndef add(value, arg, *args, **kwargs):\n return value + arg\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n@register.simple_tag()\ndef add(value, arg, *args, **kwargs):\n return value + arg\n",
"step-4": "from django import template\nimport ast\nregister = template.Library()\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n@register.simple_tag()\ndef add(value, arg, *args, **kwargs):\n return value + arg\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Command(BaseCommand):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Command(BaseCommand):
def handle(self, *args, **options):
print('Loading article settings')
ArticleCoverSetting.objects.all().delete()
for j in Journal.objects.all():
s = ArticleCoverSetting()
s.journal = j
s.title_x = 10
s.title_y = 100
s.number_x = 50
s.number_y = 70
s.category_x = 140
s.category_y = 80
s.save()
print('saving settings %s' % s.journal)
<|reserved_special_token_1|>
from django.core.management.base import BaseCommand
from journal.models import Journal
from article.models import ArticleCoverSetting
from django.conf import settings
import os
class Command(BaseCommand):
def handle(self, *args, **options):
print('Loading article settings')
ArticleCoverSetting.objects.all().delete()
for j in Journal.objects.all():
s = ArticleCoverSetting()
s.journal = j
s.title_x = 10
s.title_y = 100
s.number_x = 50
s.number_y = 70
s.category_x = 140
s.category_y = 80
s.save()
print('saving settings %s' % s.journal)
|
flexible
|
{
"blob_id": "a3d27561488c38e1256eb33abad108ad42081eb6",
"index": 9253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n print('Loading article settings')\n ArticleCoverSetting.objects.all().delete()\n for j in Journal.objects.all():\n s = ArticleCoverSetting()\n s.journal = j\n s.title_x = 10\n s.title_y = 100\n s.number_x = 50\n s.number_y = 70\n s.category_x = 140\n s.category_y = 80\n s.save()\n print('saving settings %s' % s.journal)\n",
"step-4": "from django.core.management.base import BaseCommand\nfrom journal.models import Journal\nfrom article.models import ArticleCoverSetting\nfrom django.conf import settings\nimport os\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n print('Loading article settings')\n ArticleCoverSetting.objects.all().delete()\n for j in Journal.objects.all():\n s = ArticleCoverSetting()\n s.journal = j\n s.title_x = 10\n s.title_y = 100\n s.number_x = 50\n s.number_y = 70\n s.category_x = 140\n s.category_y = 80\n s.save()\n print('saving settings %s' % s.journal)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reader():
with open('possibilities.txt', 'r') as file1:
file_lines = [x.strip() for x in file1.readlines()]
for e in file_lines:
n = e.replace('Python', 'C++')
print(n)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reader():
with open('possibilities.txt', 'r') as file1:
file_lines = [x.strip() for x in file1.readlines()]
for e in file_lines:
n = e.replace('Python', 'C++')
print(n)
if __name__ == '__main__':
reader()
<|reserved_special_token_1|>
'''
Функція replace() може використовуватися для заміни будь-якого слова у рядку іншим словом.
Прочитайте кожен рядок зі створеного у попередньому завданні файлу learning_python.txt і замініть слово Python назвою іншої мови,
наприклад C при виведенні на екран. Це завдання написати в окремій функції.
'''
def reader():
with open('possibilities.txt', 'r') as file1:
file_lines = [x.strip() for x in file1.readlines()]
for e in file_lines:
n = e.replace('Python', 'C++')
print(n)
if __name__ == '__main__':
reader()
|
flexible
|
{
"blob_id": "6d80a89a47b68fd8d81739787897355671ca94e9",
"index": 5815,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef reader():\n with open('possibilities.txt', 'r') as file1:\n file_lines = [x.strip() for x in file1.readlines()]\n for e in file_lines:\n n = e.replace('Python', 'C++')\n print(n)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef reader():\n with open('possibilities.txt', 'r') as file1:\n file_lines = [x.strip() for x in file1.readlines()]\n for e in file_lines:\n n = e.replace('Python', 'C++')\n print(n)\n\n\nif __name__ == '__main__':\n reader()\n",
"step-4": "'''\nФункція replace() може використовуватися для заміни будь-якого слова у рядку іншим словом.\nПрочитайте кожен рядок зі створеного у попередньому завданні файлу learning_python.txt і замініть слово Python назвою іншої мови,\nнаприклад C при виведенні на екран. Це завдання написати в окремій функції.\n'''\n\n\ndef reader():\n with open('possibilities.txt', 'r') as file1:\n file_lines = [x.strip() for x in file1.readlines()]\n for e in file_lines:\n n = e.replace('Python', 'C++')\n print(n)\n\n\nif __name__ == '__main__':\n reader()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Encontrar el valor mas alto el mas rapido, el mas lento
para eso son los algoritmos de optimizacion
Para eso debemos pensar en una funcion que queramos maximizar o minimizar
Se aplican mas que todo para empresas como despegar, en donde se pueden generar buenas empresas
Empresas a la optimizacion
#############################################33
Traveling Sales Man
Cual es la ruta mas eficiente para recorrer todas las ciudades
Resolver el algoritmo de sales man
Turing Prize
'''
|
normal
|
{
"blob_id": "7163be250ae3a22931de037cb6896c2e6d5f00a8",
"index": 584,
"step-1": "<mask token>\n",
"step-2": "'''\n Encontrar el valor mas alto el mas rapido, el mas lento\n para eso son los algoritmos de optimizacion\n Para eso debemos pensar en una funcion que queramos maximizar o minimizar\n Se aplican mas que todo para empresas como despegar, en donde se pueden generar buenas empresas\n Empresas a la optimizacion \n #############################################33\n Traveling Sales Man\n Cual es la ruta mas eficiente para recorrer todas las ciudades\n Resolver el algoritmo de sales man \n Turing Prize\n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def colorful(A):
sA = str(A)
len_sA = len(sA)
if len_sA == 1:
return 1
dig_list = []
for i in range(len_sA):
for j in range(i, len_sA):
dig_list.append(int(sA[i:j + 1]))
mul = {}
for val in dig_list:
m = 1
for v in str(val):
m *= int(v)
if m in mul:
return 0
else:
mul[m] = 1
return 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def colorful(A):
sA = str(A)
len_sA = len(sA)
if len_sA == 1:
return 1
dig_list = []
for i in range(len_sA):
for j in range(i, len_sA):
dig_list.append(int(sA[i:j + 1]))
mul = {}
for val in dig_list:
m = 1
for v in str(val):
m *= int(v)
if m in mul:
return 0
else:
mul[m] = 1
return 1
print(colorful(0))
print(colorful(111))
print(colorful(3245))
<|reserved_special_token_1|>
"""A number can be broken into different contiguous sub-subsequence parts.
Suppose, a number 3245 can be broken into parts like 3 2 4 5 32 24 45 324 245.
And this number is a COLORFUL number, since product of every digit of a contiguous subsequence is different
"""
def colorful(A):
sA = str(A)
len_sA = len(sA)
if len_sA == 1:
return (1)
dig_list = []
for i in range(len_sA):
for j in range(i, len_sA):
dig_list.append(int(sA[i:j + 1]))
mul = {}
for val in dig_list:
m = 1
for v in str(val):
m *= int(v)
if m in mul:
return (0)
else:
mul[m] = 1
return (1)
print (colorful(0))
print (colorful(111))
print (colorful(3245))
|
flexible
|
{
"blob_id": "41013469e65e45f6c909d66c2a54eaf11dfd474c",
"index": 3077,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef colorful(A):\n sA = str(A)\n len_sA = len(sA)\n if len_sA == 1:\n return 1\n dig_list = []\n for i in range(len_sA):\n for j in range(i, len_sA):\n dig_list.append(int(sA[i:j + 1]))\n mul = {}\n for val in dig_list:\n m = 1\n for v in str(val):\n m *= int(v)\n if m in mul:\n return 0\n else:\n mul[m] = 1\n return 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef colorful(A):\n sA = str(A)\n len_sA = len(sA)\n if len_sA == 1:\n return 1\n dig_list = []\n for i in range(len_sA):\n for j in range(i, len_sA):\n dig_list.append(int(sA[i:j + 1]))\n mul = {}\n for val in dig_list:\n m = 1\n for v in str(val):\n m *= int(v)\n if m in mul:\n return 0\n else:\n mul[m] = 1\n return 1\n\n\nprint(colorful(0))\nprint(colorful(111))\nprint(colorful(3245))\n",
"step-4": "\"\"\"A number can be broken into different contiguous sub-subsequence parts. \nSuppose, a number 3245 can be broken into parts like 3 2 4 5 32 24 45 324 245. \nAnd this number is a COLORFUL number, since product of every digit of a contiguous subsequence is different\n\"\"\"\n\ndef colorful(A):\n sA = str(A)\n len_sA = len(sA)\n if len_sA == 1:\n return (1)\n dig_list = []\n for i in range(len_sA):\n for j in range(i, len_sA):\n dig_list.append(int(sA[i:j + 1]))\n mul = {}\n for val in dig_list:\n m = 1\n for v in str(val):\n m *= int(v)\n if m in mul:\n return (0)\n else:\n mul[m] = 1\n return (1)\n\nprint (colorful(0))\nprint (colorful(111))\nprint (colorful(3245))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.1.5 on 2019-08-03 23:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0003_auto_20190802_2211'),
]
operations = [
migrations.AlterModelOptions(
name='customerinfo',
options={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'},
),
]
|
normal
|
{
"blob_id": "b90fb1e657d4c7e186a7b889eee586527bec4413",
"index": 2040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('crm', '0003_auto_20190802_2211')]\n operations = [migrations.AlterModelOptions(name='customerinfo', options\n ={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'})]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('crm', '0003_auto_20190802_2211')]\n operations = [migrations.AlterModelOptions(name='customerinfo', options\n ={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'})]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-08-03 23:15\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('crm', '0003_auto_20190802_2211'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='customerinfo',\n options={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Merkle: Implementation of Merkle Trees over Blake2
"""
from typing import List, Any
from hashlib import blake2b
class Merkle:
"""
We consider the merkle tree as a commitment protocol implementing
the interface:
* commit_() : commits to a list by computing the merkle tree.
* open_() : opens the commitment by computing the authentification path.
* verify_() : verify that a value is commited by checking that its a leaf.
"""
H = blake2b
def commit_(leafs):
assert len(leafs) & (len(leafs) - 1) == 0, "List must be of a power two length"
if len(leafs) == 1:
return leafs[0]
return Merkle.H(
Merkle.commit_(leafs[: (len(leafs) // 2)])
+ Merkle.commit_(leafs[(len(leafs) // 2) :])
).digest()
def open_(index, leafs):
assert len(leafs) & (len(leafs) - 1) == 0, "List must be of a power two length"
assert 0 <= index and index < len(leafs)
if len(leafs) == 2:
return [leafs[1 - index]]
elif index < (len(leafs) / 2):
return Merkle.open_(index, leafs[: (len(leafs) // 2)]) + [
Merkle.commit_(leafs[(len(leafs) // 2) :])
]
else:
return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) // 2 :]) + [
Merkle.commit_(leafs[: len(leafs) // 2])
]
def verify_(root, index, path, leaf):
assert 0 <= index and index < (1 << len(path)), "cannot verify invalid index"
if len(path) == 1:
if index == 0:
return root == Merkle.H(leaf + path[0]).digest()
else:
return root == Merkle.H(path[0] + leaf).digest()
else:
if index % 2 == 0:
return Merkle.verify_(
root, index >> 1, path[1:], Merkle.H(leaf + path[0]).digest()
)
else:
return Merkle.verify_(
root, index >> 1, path[1:], Merkle.H(path[0] + leaf).digest()
)
# The following functions expose the API and compute hashes of leafs before
# calling the underlying code.
def commit(leafs: List[Any]):
return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in leafs])
def open(index: int, leafs: List[Any]):
return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in leafs])
def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]):
return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest())
|
normal
|
{
"blob_id": "547926904f9a4b88a988e3b59c49b94fe0e30de4",
"index": 1955,
"step-1": "<mask token>\n\n\nclass Merkle:\n <mask token>\n <mask token>\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n <mask token>\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-2": "<mask token>\n\n\nclass Merkle:\n <mask token>\n <mask token>\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-3": "<mask token>\n\n\nclass Merkle:\n \"\"\"\n We consider the merkle tree as a commitment protocol implementing\n the interface:\n * commit_() : commits to a list by computing the merkle tree.\n * open_() : opens the commitment by computing the authentification path.\n * verify_() : verify that a value is commited by checking that its a leaf.\n \"\"\"\n H = blake2b\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-4": "<mask token>\nfrom typing import List, Any\nfrom hashlib import blake2b\n\n\nclass Merkle:\n \"\"\"\n We consider the merkle tree as a commitment protocol implementing\n the interface:\n * commit_() : commits to a list by computing the merkle tree.\n * open_() : opens the commitment by computing the authentification path.\n * verify_() : verify that a value is commited by checking that its a leaf.\n \"\"\"\n H = blake2b\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-5": "\"\"\"\nMerkle: Implementation of Merkle Trees over Blake2\n\"\"\"\nfrom typing import List, Any\nfrom hashlib import blake2b\n\n\nclass Merkle:\n \"\"\"\n We consider the merkle tree as a commitment protocol implementing\n the interface:\n * commit_() : commits to a list by computing the merkle tree.\n * open_() : opens the commitment by computing the authentification path.\n * verify_() : verify that a value is commited by checking that its a leaf.\n \"\"\"\n\n H = blake2b\n\n def commit_(leafs):\n assert len(leafs) & (len(leafs) - 1) == 0, \"List must be of a power two length\"\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(\n Merkle.commit_(leafs[: (len(leafs) // 2)])\n + Merkle.commit_(leafs[(len(leafs) // 2) :])\n ).digest()\n\n def open_(index, leafs):\n assert len(leafs) & (len(leafs) - 1) == 0, \"List must be of a power two length\"\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < (len(leafs) / 2):\n return Merkle.open_(index, leafs[: (len(leafs) // 2)]) + [\n Merkle.commit_(leafs[(len(leafs) // 2) :])\n ]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) // 2 :]) + [\n Merkle.commit_(leafs[: len(leafs) // 2])\n ]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < (1 << len(path)), \"cannot verify invalid index\"\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n else:\n if index % 2 == 0:\n return Merkle.verify_(\n root, index >> 1, path[1:], Merkle.H(leaf + path[0]).digest()\n )\n else:\n return Merkle.verify_(\n root, index >> 1, path[1:], Merkle.H(path[0] + leaf).digest()\n )\n\n # The following functions expose the API and compute hashes of leafs before\n # calling the underlying code.\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest())\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
from flask import (Flask, g, render_template, flash, redirect, url_for)
from flask_login import (LoginManager, login_user, logout_user,
login_required, current_user)
import forms
import models
import sqlite3
DEBUG = True
app = Flask(__name__)
app.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""""Close the database connection after request. """
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash("Yay, you registered", "sucess")
models.User.create_user(
username=form.username.data,
email=form.email.data,
password=form.password.data,
confrimpassword=form.password.data
)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.emails == form.email.data)
except models.DoesNOtExit:
flash("Your email or password doesn't match !", "error")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in:", "Sucess")
return redirect(url_for('index'))
else:
flash("Your email or password doesn't match!", "error")
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("You.ve been logged out! Come back soon!", "sucess")
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required #makes sures the user is logged in before been able to post
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(),
content=form.content.data.strip())
flash("Message Posted! Thanks!", "sucess")
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
"""
models.initialize()
try:
models.User.create_user(
username='Steve',
email='stephenashom40@gmail.com',
password='passsword',
admin=True
)
except ValueError:
pass
"""
if __name__ == '__main__':
app.run(debug=DEBUG)
|
normal
|
{
"blob_id": "849c468e4890c19806c678089ec8668576538b12",
"index": 2717,
"step-1": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\n@app.before_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\n<mask token>\n\n\n@app.route('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\n<mask token>\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\n@app.route('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\n@app.route('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\n@app.before_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\n<mask token>\n\n\n@app.route('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\n@app.route('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", 'Sucess')\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", 'error')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\n@app.route('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\n@app.route('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\n@app.before_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\n@app.after_request\ndef after_request(response):\n \"\"\"\"\"Close the database connection after request. \"\"\"\n g.db.close()\n return response\n\n\n@app.route('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\n@app.route('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", 'Sucess')\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", 'error')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\n@app.route('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\n@app.route('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\n",
"step-4": "from flask import Flask, g, render_template, flash, redirect, url_for\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nimport forms\nimport models\nimport sqlite3\nDEBUG = True\napp = Flask(__name__)\napp.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\n@app.before_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\n@app.after_request\ndef after_request(response):\n \"\"\"\"\"Close the database connection after request. \"\"\"\n g.db.close()\n return response\n\n\n@app.route('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash('Yay, you registered', 'sucess')\n models.User.create_user(username=form.username.data, email=form.\n email.data, password=form.password.data, confrimpassword=form.\n password.data)\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\n@app.route('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", 'Sucess')\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", 'error')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You.ve been logged out! Come back soon!', 'sucess')\n return redirect(url_for('index'))\n\n\n@app.route('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(), content=form.\n content.data.strip())\n flash('Message Posted! Thanks!', 'sucess')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n\n@app.route('/')\ndef index():\n return 'Hey!'\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=DEBUG)\n",
"step-5": "from flask import (Flask, g, render_template, flash, redirect, url_for)\nfrom flask_login import (LoginManager, login_user, logout_user,\n login_required, current_user)\n\nimport forms\nimport models\nimport sqlite3\n\nDEBUG = True\n\napp = Flask(__name__)\napp.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.user.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\n\ndef initialize():\n models.DATABASE.connect()\n models.DATABASE.create_tables([models.User], safe=True)\n models.DATABASE.closer()\n\n\n@app.before_request\ndef before_request():\n \"\"\"\"Connect to the database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n@app.after_request\ndef after_request(response):\n \"\"\"\"\"Close the database connection after request. \"\"\"\n g.db.close()\n return response\n\n@app.route('/register', methods=('GET', 'POST'))\ndef register():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n flash(\"Yay, you registered\", \"sucess\")\n models.User.create_user(\n username=form.username.data,\n email=form.email.data,\n password=form.password.data,\n confrimpassword=form.password.data\n )\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\ndef check_password_hash(password, data):\n pass\n\n\n@app.route('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.emails == form.email.data)\n except models.DoesNOtExit:\n flash(\"Your email or password doesn't match !\", \"error\")\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in:\", \"Sucess\")\n return redirect(url_for('index'))\n else:\n flash(\"Your email or password doesn't match!\", \"error\")\n return render_template('login.html', form=form)\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash(\"You.ve been logged out! Come back soon!\", \"sucess\")\n return redirect(url_for('index'))\n\n@app.route('/new_post', methods=('GET', 'POST'))\n@login_required #makes sures the user is logged in before been able to post\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(),\n content=form.content.data.strip())\n flash(\"Message Posted! Thanks!\", \"sucess\")\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n\n@app.route('/')\ndef index():\n return 'Hey!'\n\n\"\"\"\nmodels.initialize()\ntry:\n models.User.create_user(\n username='Steve',\n email='stephenashom40@gmail.com',\n password='passsword',\n admin=True\n )\n except ValueError:\n pass\n\"\"\" \nif __name__ == '__main__':\n app.run(debug=DEBUG)\n",
"step-ids": [
8,
9,
10,
13,
14
]
}
|
[
8,
9,
10,
13,
14
] |
n = int(input())
a = oct(n)
b = hex(n)
print(a[2:],b[2:].upper())
#.upper : 소문자 -> 대문자
|
normal
|
{
"blob_id": "d6cea40e907a0424b2b1b8162f19aa8203443e55",
"index": 4360,
"step-1": "n = int(input())\n\na = oct(n)\nb = hex(n)\n\nprint(a[2:],b[2:].upper())\n\n#.upper : 소문자 -> 대문자\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,
seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
<|reserved_special_token_0|>
def get(df):
col = df[['review_body']]
print(col.head())
aspect = df[['Aspects']]
opinions = df[['Sentiments']]
print(df.shape[0])
now = ''
for o in range(0, df.shape[0]):
d = col.iloc[o:o + 1]
sd = d.to_string(index=False, header=None)
sd = sd[1:]
l = sent_tokenize(sd)
a = aspect.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
asp = sa.split(';')
a = opinions.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
senti = sa.split(';')
if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti
):
continue
it = 0
for i in l:
chks = [x.strip() for x in senti[it].split(',')]
chka = [x.strip() for x in asp[it].split(',')]
g = []
itr = 0
if len(chks) != len(chka):
continue
for k in chka:
f = k.split(' ')
num = chks[itr]
if len(f) > 1:
h = 0
for x in f:
x = x.strip(' ')
x = x.strip('"')
g += [x]
if h < len(f) - 1:
chks.insert(itr, '1')
h += 1
else:
g += f
itr += 1
chka = g
now += i
now += '####'
j = i.split(' ')
itr = 0
for word in j:
if itr < len(chka) and word == chka[itr]:
if chks[itr] == '1':
s = word + '=T-POS'
elif chks[itr] == '0':
s = word + '=T-NEU'
else:
s = word + '=T-NEG'
itr += 1
else:
s = word + '=O'
now += s + ' '
now += '\n'
it += 1
return now
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pd.set_option('display.max_colwidth', None)
<|reserved_special_token_0|>
def train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,
seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
<|reserved_special_token_0|>
def get(df):
col = df[['review_body']]
print(col.head())
aspect = df[['Aspects']]
opinions = df[['Sentiments']]
print(df.shape[0])
now = ''
for o in range(0, df.shape[0]):
d = col.iloc[o:o + 1]
sd = d.to_string(index=False, header=None)
sd = sd[1:]
l = sent_tokenize(sd)
a = aspect.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
asp = sa.split(';')
a = opinions.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
senti = sa.split(';')
if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti
):
continue
it = 0
for i in l:
chks = [x.strip() for x in senti[it].split(',')]
chka = [x.strip() for x in asp[it].split(',')]
g = []
itr = 0
if len(chks) != len(chka):
continue
for k in chka:
f = k.split(' ')
num = chks[itr]
if len(f) > 1:
h = 0
for x in f:
x = x.strip(' ')
x = x.strip('"')
g += [x]
if h < len(f) - 1:
chks.insert(itr, '1')
h += 1
else:
g += f
itr += 1
chka = g
now += i
now += '####'
j = i.split(' ')
itr = 0
for word in j:
if itr < len(chka) and word == chka[itr]:
if chks[itr] == '1':
s = word + '=T-POS'
elif chks[itr] == '0':
s = word + '=T-NEU'
else:
s = word + '=T-NEG'
itr += 1
else:
s = word + '=O'
now += s + ' '
now += '\n'
it += 1
return now
<|reserved_special_token_0|>
text_file.close()
<|reserved_special_token_0|>
text_file.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
csv_file = open('/home/debajit15/train+dev.csv')
pd.set_option('display.max_colwidth', None)
df = pd.read_csv(csv_file, sep=',')
df = df[pd.notnull(df['Aspects'])]
def train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,
seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
trainl, vall = train_validate_test_split(df)
def get(df):
col = df[['review_body']]
print(col.head())
aspect = df[['Aspects']]
opinions = df[['Sentiments']]
print(df.shape[0])
now = ''
for o in range(0, df.shape[0]):
d = col.iloc[o:o + 1]
sd = d.to_string(index=False, header=None)
sd = sd[1:]
l = sent_tokenize(sd)
a = aspect.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
asp = sa.split(';')
a = opinions.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
senti = sa.split(';')
if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti
):
continue
it = 0
for i in l:
chks = [x.strip() for x in senti[it].split(',')]
chka = [x.strip() for x in asp[it].split(',')]
g = []
itr = 0
if len(chks) != len(chka):
continue
for k in chka:
f = k.split(' ')
num = chks[itr]
if len(f) > 1:
h = 0
for x in f:
x = x.strip(' ')
x = x.strip('"')
g += [x]
if h < len(f) - 1:
chks.insert(itr, '1')
h += 1
else:
g += f
itr += 1
chka = g
now += i
now += '####'
j = i.split(' ')
itr = 0
for word in j:
if itr < len(chka) and word == chka[itr]:
if chks[itr] == '1':
s = word + '=T-POS'
elif chks[itr] == '0':
s = word + '=T-NEU'
else:
s = word + '=T-NEG'
itr += 1
else:
s = word + '=O'
now += s + ' '
now += '\n'
it += 1
return now
train = get(trainl)
val = get(vall)
text_file = open('/home/debajit15/train.txt', 'w')
n = text_file.write(train)
text_file.close()
text_file = open('/home/debajit15/dev.txt', 'w')
n = text_file.write(val)
text_file.close()
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import csv
from nltk.tokenize import sent_tokenize
csv_file = open('/home/debajit15/train+dev.csv')
pd.set_option('display.max_colwidth', None)
df = pd.read_csv(csv_file, sep=',')
df = df[pd.notnull(df['Aspects'])]
def train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,
seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
trainl, vall = train_validate_test_split(df)
def get(df):
col = df[['review_body']]
print(col.head())
aspect = df[['Aspects']]
opinions = df[['Sentiments']]
print(df.shape[0])
now = ''
for o in range(0, df.shape[0]):
d = col.iloc[o:o + 1]
sd = d.to_string(index=False, header=None)
sd = sd[1:]
l = sent_tokenize(sd)
a = aspect.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
asp = sa.split(';')
a = opinions.iloc[o:o + 1]
sa = a.to_string(index=False, header=None)
senti = sa.split(';')
if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti
):
continue
it = 0
for i in l:
chks = [x.strip() for x in senti[it].split(',')]
chka = [x.strip() for x in asp[it].split(',')]
g = []
itr = 0
if len(chks) != len(chka):
continue
for k in chka:
f = k.split(' ')
num = chks[itr]
if len(f) > 1:
h = 0
for x in f:
x = x.strip(' ')
x = x.strip('"')
g += [x]
if h < len(f) - 1:
chks.insert(itr, '1')
h += 1
else:
g += f
itr += 1
chka = g
now += i
now += '####'
j = i.split(' ')
itr = 0
for word in j:
if itr < len(chka) and word == chka[itr]:
if chks[itr] == '1':
s = word + '=T-POS'
elif chks[itr] == '0':
s = word + '=T-NEU'
else:
s = word + '=T-NEG'
itr += 1
else:
s = word + '=O'
now += s + ' '
now += '\n'
it += 1
return now
train = get(trainl)
val = get(vall)
text_file = open('/home/debajit15/train.txt', 'w')
n = text_file.write(train)
text_file.close()
text_file = open('/home/debajit15/dev.txt', 'w')
n = text_file.write(val)
text_file.close()
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import csv
#import nltk
#nltk.download('punkt')
from nltk.tokenize import sent_tokenize
csv_file=open("/home/debajit15/train+dev.csv")
pd.set_option('display.max_colwidth', None)
df=pd.read_csv(csv_file,sep=',');
df = df[pd.notnull(df['Aspects'])]
#print(df['Opinion_Words'].iloc[0:1])
def train_validate_test_split(df, train_percent=.8, validate_percent=.2, seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
trainl,vall=train_validate_test_split(df)
def get(df):
col=df[['review_body']]
print(col.head())
aspect=df[['Aspects']]
opinions=df[['Sentiments']]
print(df.shape[0])
now=""
for o in range(0,df.shape[0]):
d=col.iloc[o:o+1]
sd=d.to_string(index=False,header=None)
sd=sd[1:]
l=sent_tokenize(sd)
a=aspect.iloc[o:o+1]
sa=a.to_string(index=False,header=None)
asp=sa.split(";")
a=opinions.iloc[o:o+1]
sa=a.to_string(index=False,header=None)
senti=sa.split(";")
if(len(asp)!=len(senti) or len(l)!=len(asp) or len(l)!=len(senti)):
continue
it=0
for i in l:
chks=[x.strip() for x in senti[it].split(",")]
chka=[x.strip() for x in asp[it].split(",")]
g=[]
itr=0
if(len(chks)!=len(chka)):
continue
for k in chka:
f=k.split(" ")
num=chks[itr]
if(len(f)>1):
h=0
for x in f:
x=x.strip(' ')
x=x.strip('"')
g+=[x]
if(h<len(f)-1):
chks.insert(itr,'1')
h+=1
else:
g+=f
itr+=1
chka=g
now+=i
now+="####"
j=i.split(" ")
itr=0
for word in j:
if itr<len(chka) and word==chka[itr] :
if chks[itr]=='1':
s=word+"=T-POS"
elif chks[itr]=='0':
s=word+"=T-NEU"
else:
s=word+"=T-NEG"
itr+=1
else:
s=word+"=O"
now+=s+" "
now+="\n"
it+=1
return now
train=get(trainl)
val=get(vall)
text_file = open("/home/debajit15/train.txt", "w")
n = text_file.write(train)
text_file.close()
text_file = open("/home/debajit15/dev.txt", "w")
n = text_file.write(val)
text_file.close()
# #print(df[['review_body']])
|
flexible
|
{
"blob_id": "c18c407476375fb1647fefaedb5d7ea0e0aabe3a",
"index": 929,
"step-1": "<mask token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<mask token>\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\n<mask token>\n",
"step-2": "<mask token>\npd.set_option('display.max_colwidth', None)\n<mask token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<mask token>\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\n<mask token>\ntext_file.close()\n<mask token>\ntext_file.close()\n",
"step-3": "<mask token>\ncsv_file = open('/home/debajit15/train+dev.csv')\npd.set_option('display.max_colwidth', None)\ndf = pd.read_csv(csv_file, sep=',')\ndf = df[pd.notnull(df['Aspects'])]\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\ntrainl, vall = train_validate_test_split(df)\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\ntrain = get(trainl)\nval = get(vall)\ntext_file = open('/home/debajit15/train.txt', 'w')\nn = text_file.write(train)\ntext_file.close()\ntext_file = open('/home/debajit15/dev.txt', 'w')\nn = text_file.write(val)\ntext_file.close()\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport csv\nfrom nltk.tokenize import sent_tokenize\ncsv_file = open('/home/debajit15/train+dev.csv')\npd.set_option('display.max_colwidth', None)\ndf = pd.read_csv(csv_file, sep=',')\ndf = df[pd.notnull(df['Aspects'])]\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\ntrainl, vall = train_validate_test_split(df)\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\ntrain = get(trainl)\nval = get(vall)\ntext_file = open('/home/debajit15/train.txt', 'w')\nn = text_file.write(train)\ntext_file.close()\ntext_file = open('/home/debajit15/dev.txt', 'w')\nn = text_file.write(val)\ntext_file.close()\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport csv\n#import nltk\n#nltk.download('punkt')\nfrom nltk.tokenize import sent_tokenize\ncsv_file=open(\"/home/debajit15/train+dev.csv\")\npd.set_option('display.max_colwidth', None)\ndf=pd.read_csv(csv_file,sep=',');\ndf = df[pd.notnull(df['Aspects'])]\n#print(df['Opinion_Words'].iloc[0:1])\n\ndef train_validate_test_split(df, train_percent=.8, validate_percent=.2, seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\ntrainl,vall=train_validate_test_split(df)\n\ndef get(df):\n\tcol=df[['review_body']]\n\tprint(col.head())\n\taspect=df[['Aspects']]\n\topinions=df[['Sentiments']]\n\tprint(df.shape[0])\n\tnow=\"\"\n\tfor o in range(0,df.shape[0]):\n\t\td=col.iloc[o:o+1]\n\t\tsd=d.to_string(index=False,header=None)\n\t\tsd=sd[1:]\n\t\tl=sent_tokenize(sd)\n\n\t\ta=aspect.iloc[o:o+1]\n\t\tsa=a.to_string(index=False,header=None)\n\t\tasp=sa.split(\";\")\n\n\t\ta=opinions.iloc[o:o+1]\n\t\tsa=a.to_string(index=False,header=None)\n\t\tsenti=sa.split(\";\")\n\n\t\tif(len(asp)!=len(senti) or len(l)!=len(asp) or len(l)!=len(senti)):\n\t\t\tcontinue\n\t\tit=0\n\t\tfor i in l:\n\t\t\tchks=[x.strip() for x in senti[it].split(\",\")]\n\t\t\tchka=[x.strip() for x in asp[it].split(\",\")]\n\n\t\t\tg=[]\n\t\t\titr=0\n\t\t\tif(len(chks)!=len(chka)):\n\t\t\t\tcontinue\n\t\t\tfor k in chka:\n\t\t\t\tf=k.split(\" \")\n\t\t\t\tnum=chks[itr]\n\t\t\t\tif(len(f)>1):\n\t\t\t\t\th=0\n\t\t\t\t\tfor x in f:\n\t\t\t\t\t\tx=x.strip(' ')\n\t\t\t\t\t\tx=x.strip('\"')\n\t\t\t\t\t\tg+=[x]\n\t\t\t\t\t\tif(h<len(f)-1):\n\t\t\t\t\t\t\tchks.insert(itr,'1')\n\t\t\t\t\t\th+=1\n\t\t\t\telse:\n\t\t\t\t\tg+=f\n\t\t\t\titr+=1\n\t\t\tchka=g\n\t\t\tnow+=i\n\t\t\tnow+=\"####\"\n\t\t\tj=i.split(\" \")\n\t\t\titr=0\n\t\t\tfor word in j:\n\t\t\t\tif itr<len(chka) and word==chka[itr] :\n\t\t\t\t\tif chks[itr]=='1':\n\t\t\t\t\t\ts=word+\"=T-POS\"\n\t\t\t\t\telif chks[itr]=='0':\n\t\t\t\t\t\ts=word+\"=T-NEU\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ts=word+\"=T-NEG\"\n\t\t\t\t\titr+=1\n\t\t\t\telse:\n\t\t\t\t\ts=word+\"=O\"\n\t\t\t\tnow+=s+\" \"\n\t\t\tnow+=\"\\n\"\n\t\t\tit+=1\n\treturn now\n\n\ntrain=get(trainl)\nval=get(vall)\n\ntext_file = open(\"/home/debajit15/train.txt\", \"w\")\nn = text_file.write(train)\ntext_file.close()\ntext_file = open(\"/home/debajit15/dev.txt\", \"w\")\nn = text_file.write(val)\ntext_file.close()\n\n\n# #print(df[['review_body']])\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Visualize the predictions of a GQCNN on a dataset Visualizes TP, TN, FP, FN..
Author: Vishal Satish
"""
import copy
import logging
import numpy as np
import os
import sys
from random import shuffle
import autolab_core.utils as utils
from autolab_core import YamlConfig, Point
from perception import BinaryImage, ColorImage, DepthImage, GdImage, GrayscaleImage, RgbdImage, RenderMode
from gqcnn import Grasp2D, GQCNN, ClassificationResult, InputDataMode, ImageMode, ImageFileTemplates
from gqcnn import Visualizer as vis2d
import IPython
class GQCNNPredictionVisualizer(object):
""" Class to visualize predictions of GQCNN on a specified dataset. Visualizes TP, TN, FP, FN. """
def __init__(self, config):
"""
Parameters
----------
config : dict
dictionary of configuration parameters
"""
# setup config
self.cfg = config
# setup for visualization
self._setup()
def visualize(self):
""" Visualize predictions """
logging.info('Visualizing ' + self.datapoint_type)
# iterate through shuffled file indices
for i in self.indices:
im_filename = self.im_filenames[i]
pose_filename = self.pose_filenames[i]
label_filename = self.label_filenames[i]
logging.info('Loading Image File: ' + im_filename + ' Pose File: ' + pose_filename + ' Label File: ' + label_filename)
# load tensors from files
metric_tensor = np.load(os.path.join(self.data_dir, label_filename))['arr_0']
label_tensor = 1 * (metric_tensor > self.metric_thresh)
image_tensor = np.load(os.path.join(self.data_dir, im_filename))['arr_0']
hand_poses_tensor = np.load(os.path.join(self.data_dir, pose_filename))['arr_0']
pose_tensor = self._read_pose_data(hand_poses_tensor, self.input_data_mode)
# score with neural network
pred_p_success_tensor = self._gqcnn.predict(image_tensor, pose_tensor)
# compute results
classification_result = ClassificationResult([pred_p_success_tensor],
[label_tensor])
logging.info('Error rate on files: %.3f' %(classification_result.error_rate))
logging.info('Precision on files: %.3f' %(classification_result.precision))
logging.info('Recall on files: %.3f' %(classification_result.recall))
mispred_ind = classification_result.mispredicted_indices()
correct_ind = classification_result.correct_indices()
# IPython.embed()
if self.datapoint_type == 'true_positive' or self.datapoint_type == 'true_negative':
vis_ind = correct_ind
else:
vis_ind = mispred_ind
num_visualized = 0
# visualize
for ind in vis_ind:
# limit the number of sampled datapoints displayed per object
if num_visualized >= self.samples_per_object:
break
num_visualized += 1
# don't visualize the datapoints that we don't want
if self.datapoint_type == 'true_positive':
if classification_result.labels[ind] == 0:
continue
elif self.datapoint_type == 'true_negative':
if classification_result.labels[ind] == 1:
continue
elif self.datapoint_type == 'false_positive':
if classification_result.labels[ind] == 0:
continue
elif self.datapoint_type == 'false_negative':
if classification_result.labels[ind] == 1:
continue
logging.info('Datapoint %d of files for %s' %(ind, im_filename))
logging.info('Depth: %.3f' %(hand_poses_tensor[ind, 2]))
data = image_tensor[ind,...]
if self.display_image_type == RenderMode.SEGMASK:
image = BinaryImage(data)
elif self.display_image_type == RenderMode.GRAYSCALE:
image = GrayscaleImage(data)
elif self.display_image_type == RenderMode.COLOR:
image = ColorImage(data)
elif self.display_image_type == RenderMode.DEPTH:
image = DepthImage(data)
elif self.display_image_type == RenderMode.RGBD:
image = RgbdImage(data)
elif self.display_image_type == RenderMode.GD:
image = GdImage(data)
vis2d.figure()
if self.display_image_type == RenderMode.RGBD:
vis2d.subplot(1,2,1)
vis2d.imshow(image.color)
grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)
grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)
vis2d.grasp(grasp)
vis2d.subplot(1,2,2)
vis2d.imshow(image.depth)
vis2d.grasp(grasp)
elif self.display_image_type == RenderMode.GD:
vis2d.subplot(1,2,1)
vis2d.imshow(image.gray)
grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)
grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)
vis2d.grasp(grasp)
vis2d.subplot(1,2,2)
vis2d.imshow(image.depth)
vis2d.grasp(grasp)
else:
vis2d.imshow(image)
grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)
grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)
vis2d.grasp(grasp)
vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(ind,
classification_result.pred_probs[ind,1],
classification_result.labels[ind]))
vis2d.show()
# cleanup
self._cleanup()
def _cleanup(self):
""" Close GQCNN TF session"""
self._gqcnn.close_session()
def _setup(self):
""" Setup for visualization """
# setup logger
logging.getLogger().setLevel(logging.INFO)
logging.info('Setting up for visualization.')
#### read config params ###
# dataset directory
self.data_dir = self.cfg['dataset_dir']
# visualization params
self.display_image_type = self.cfg['display_image_type']
self.font_size = self.cfg['font_size']
self.samples_per_object = self.cfg['samples_per_object']
# analysis params
self.datapoint_type = self.cfg['datapoint_type']
self.image_mode = self.cfg['image_mode']
self.input_data_mode = self.cfg['data_format']
self.target_metric_name = self.cfg['metric_name']
self.metric_thresh = self.cfg['metric_thresh']
self.gripper_width_m = self.cfg['gripper_width_m']
# setup data filenames
self._setup_data_filenames()
# setup shuffled file indices
self._compute_indices()
# load gqcnn
logging.info('Loading GQ-CNN')
self.model_dir = self.cfg['model_dir']
self._gqcnn = GQCNN.load(self.model_dir)
self._gqcnn.open_session()
def _setup_data_filenames(self):
""" Setup image and pose data filenames, subsample files, check validity of filenames/image mode """
# read in filenames of training data(poses, images, labels)
logging.info('Reading filenames')
all_filenames = os.listdir(self.data_dir)
if self.image_mode== ImageMode.BINARY:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1]
elif self.image_mode== ImageMode.DEPTH:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1]
elif self.image_mode== ImageMode.BINARY_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.COLOR_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.GRAY_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.DEPTH_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.DEPTH_TF_TABLE:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1]
else:
raise ValueError('Image mode %s not supported.' %(self.image_mode))
self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1]
self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1]
self.im_filenames.sort(key = lambda x: int(x[-9:-4]))
self.pose_filenames.sort(key = lambda x: int(x[-9:-4]))
self.label_filenames.sort(key = lambda x: int(x[-9:-4]))
# check that all file categories were found
if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0:
raise ValueError('1 or more required training files could not be found')
def _compute_indices(self):
""" Generate random file index so visualization starts from a
different random file everytime """
self.indices = np.arange(len(self.im_filenames))
np.random.shuffle(self.indices)
def _read_pose_data(self, pose_arr, input_data_mode):
""" Read the pose data and slice it according to the specified input_data_mode
Parameters
----------
pose_arr: :obj:`ndArray`
full pose data array read in from file
input_data_mode: :obj:`InputDataMode`
enum for input data mode, see optimizer_constants.py for all
possible input data modes
Returns
-------
:obj:`ndArray`
sliced pose_data corresponding to input data mode
"""
if input_data_mode == InputDataMode.TF_IMAGE:
return pose_arr[:,2:3]
elif input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:
return np.c_[pose_arr[:,2:3], pose_arr[:,4:6]]
elif input_data_mode == InputDataMode.RAW_IMAGE:
return pose_arr[:,:4]
elif input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:
return pose_arr[:,:6]
elif input_data_mode == InputDataMode.REGRASPING:
# depth, approach angle, and delta angle for reorientation
return np.c_[pose_arr[:,2:3], pose_arr[:,4:5], pose_arr[:,6:7]]
else:
raise ValueError('Input data mode %s not supported' %(input_data_mode))
|
normal
|
{
"blob_id": "806bdb75eed91d1429d8473a50c136b58a736147",
"index": 8852,
"step-1": "\"\"\"\nVisualize the predictions of a GQCNN on a dataset Visualizes TP, TN, FP, FN..\nAuthor: Vishal Satish \n\"\"\"\nimport copy\nimport logging\nimport numpy as np\nimport os\nimport sys\nfrom random import shuffle\n\nimport autolab_core.utils as utils\nfrom autolab_core import YamlConfig, Point\nfrom perception import BinaryImage, ColorImage, DepthImage, GdImage, GrayscaleImage, RgbdImage, RenderMode\n\nfrom gqcnn import Grasp2D, GQCNN, ClassificationResult, InputDataMode, ImageMode, ImageFileTemplates\nfrom gqcnn import Visualizer as vis2d\n\nimport IPython\n\nclass GQCNNPredictionVisualizer(object):\n \"\"\" Class to visualize predictions of GQCNN on a specified dataset. Visualizes TP, TN, FP, FN. \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Parameters\n ----------\n config : dict\n dictionary of configuration parameters\n \"\"\"\n # setup config\n \tself.cfg = config\n\n \t# setup for visualization\n \tself._setup()\n\n def visualize(self):\n \"\"\" Visualize predictions \"\"\"\n\n logging.info('Visualizing ' + self.datapoint_type)\n\n # iterate through shuffled file indices\n for i in self.indices:\n im_filename = self.im_filenames[i]\n pose_filename = self.pose_filenames[i]\n label_filename = self.label_filenames[i]\n\n logging.info('Loading Image File: ' + im_filename + ' Pose File: ' + pose_filename + ' Label File: ' + label_filename)\n\n # load tensors from files\n metric_tensor = np.load(os.path.join(self.data_dir, label_filename))['arr_0']\n label_tensor = 1 * (metric_tensor > self.metric_thresh)\n image_tensor = np.load(os.path.join(self.data_dir, im_filename))['arr_0']\n hand_poses_tensor = np.load(os.path.join(self.data_dir, pose_filename))['arr_0']\n\n pose_tensor = self._read_pose_data(hand_poses_tensor, self.input_data_mode)\n\n # score with neural network\n pred_p_success_tensor = self._gqcnn.predict(image_tensor, pose_tensor)\n\n # compute results\n classification_result = ClassificationResult([pred_p_success_tensor],\n [label_tensor])\n\n logging.info('Error rate on files: %.3f' %(classification_result.error_rate))\n logging.info('Precision on files: %.3f' %(classification_result.precision))\n logging.info('Recall on files: %.3f' %(classification_result.recall))\n mispred_ind = classification_result.mispredicted_indices()\n correct_ind = classification_result.correct_indices()\n # IPython.embed()\n\n if self.datapoint_type == 'true_positive' or self.datapoint_type == 'true_negative':\n vis_ind = correct_ind\n else:\n vis_ind = mispred_ind\n num_visualized = 0\n # visualize\n for ind in vis_ind:\n # limit the number of sampled datapoints displayed per object\n if num_visualized >= self.samples_per_object:\n break\n num_visualized += 1\n\n # don't visualize the datapoints that we don't want\n if self.datapoint_type == 'true_positive':\n if classification_result.labels[ind] == 0:\n continue\n elif self.datapoint_type == 'true_negative':\n if classification_result.labels[ind] == 1:\n continue\n elif self.datapoint_type == 'false_positive':\n if classification_result.labels[ind] == 0:\n continue\n elif self.datapoint_type == 'false_negative':\n if classification_result.labels[ind] == 1:\n continue\n\n logging.info('Datapoint %d of files for %s' %(ind, im_filename))\n logging.info('Depth: %.3f' %(hand_poses_tensor[ind, 2]))\n\n data = image_tensor[ind,...]\n if self.display_image_type == RenderMode.SEGMASK:\n image = BinaryImage(data)\n elif self.display_image_type == RenderMode.GRAYSCALE:\n image = GrayscaleImage(data)\n elif self.display_image_type == RenderMode.COLOR:\n image = ColorImage(data)\n elif self.display_image_type == RenderMode.DEPTH:\n image = DepthImage(data)\n elif self.display_image_type == RenderMode.RGBD:\n image = RgbdImage(data)\n elif self.display_image_type == RenderMode.GD:\n image = GdImage(data)\n\n vis2d.figure()\n\n if self.display_image_type == RenderMode.RGBD:\n vis2d.subplot(1,2,1)\n vis2d.imshow(image.color)\n grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)\n grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)\n vis2d.grasp(grasp)\n vis2d.subplot(1,2,2)\n vis2d.imshow(image.depth)\n vis2d.grasp(grasp)\n elif self.display_image_type == RenderMode.GD:\n vis2d.subplot(1,2,1)\n vis2d.imshow(image.gray)\n grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)\n grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)\n vis2d.grasp(grasp)\n vis2d.subplot(1,2,2)\n vis2d.imshow(image.depth)\n vis2d.grasp(grasp)\n else:\n vis2d.imshow(image)\n grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)\n grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)\n vis2d.grasp(grasp)\n vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(ind,\n classification_result.pred_probs[ind,1],\n classification_result.labels[ind]))\n vis2d.show()\n\n # cleanup\n self._cleanup()\n\n def _cleanup(self):\n \"\"\" Close GQCNN TF session\"\"\"\n \tself._gqcnn.close_session()\n\n def _setup(self):\n \"\"\" Setup for visualization \"\"\"\n \t# setup logger\n \tlogging.getLogger().setLevel(logging.INFO)\t\n logging.info('Setting up for visualization.')\n\n \t#### read config params ###\n\n \t# dataset directory\n \tself.data_dir = self.cfg['dataset_dir']\n\n \t# visualization params\n self.display_image_type = self.cfg['display_image_type']\n self.font_size = self.cfg['font_size']\n self.samples_per_object = self.cfg['samples_per_object']\n\n # analysis params\n self.datapoint_type = self.cfg['datapoint_type']\n self.image_mode = self.cfg['image_mode']\n self.input_data_mode = self.cfg['data_format']\n self.target_metric_name = self.cfg['metric_name']\n self.metric_thresh = self.cfg['metric_thresh']\n self.gripper_width_m = self.cfg['gripper_width_m']\n\n # setup data filenames\n self._setup_data_filenames()\n\n # setup shuffled file indices\n self._compute_indices()\n\n # load gqcnn\n logging.info('Loading GQ-CNN')\n self.model_dir = self.cfg['model_dir']\n self._gqcnn = GQCNN.load(self.model_dir)\n self._gqcnn.open_session()\n\n def _setup_data_filenames(self):\n \"\"\" Setup image and pose data filenames, subsample files, check validity of filenames/image mode \"\"\"\n\n # read in filenames of training data(poses, images, labels)\n logging.info('Reading filenames')\n all_filenames = os.listdir(self.data_dir)\n if self.image_mode== ImageMode.BINARY:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.BINARY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.COLOR_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.GRAY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF_TABLE:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1]\n else:\n raise ValueError('Image mode %s not supported.' %(self.image_mode))\n\n self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1]\n self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1]\n\n self.im_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.pose_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.label_filenames.sort(key = lambda x: int(x[-9:-4]))\n\n # check that all file categories were found\n if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0:\n raise ValueError('1 or more required training files could not be found')\n\n def _compute_indices(self):\n \"\"\" Generate random file index so visualization starts from a \n different random file everytime \"\"\"\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)\n\n def _read_pose_data(self, pose_arr, input_data_mode):\n \"\"\" Read the pose data and slice it according to the specified input_data_mode\n\n Parameters\n ----------\n pose_arr: :obj:`ndArray`\n full pose data array read in from file\n input_data_mode: :obj:`InputDataMode`\n enum for input data mode, see optimizer_constants.py for all\n possible input data modes \n\n Returns\n -------\n :obj:`ndArray`\n sliced pose_data corresponding to input data mode\n \"\"\"\n if input_data_mode == InputDataMode.TF_IMAGE:\n return pose_arr[:,2:3]\n elif input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:\n return np.c_[pose_arr[:,2:3], pose_arr[:,4:6]]\n elif input_data_mode == InputDataMode.RAW_IMAGE:\n return pose_arr[:,:4]\n elif input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:\n return pose_arr[:,:6]\n elif input_data_mode == InputDataMode.REGRASPING:\n # depth, approach angle, and delta angle for reorientation\n return np.c_[pose_arr[:,2:3], pose_arr[:,4:5], pose_arr[:,6:7]]\n else:\n raise ValueError('Input data mode %s not supported' %(input_data_mode))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def scoreBarChart(names, score):
plt.bar(names, score)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def scoreBarChart(names, score):
plt.bar(names, score)
plt.show()
def multiBarChart(names, score):
plt.plot(names, score, 'ro--')
plt.plot([1, 2, 3], [70, 80, 90], 'bo:')
plt.plot([1, 1, 1], [10, 20, 30], 'r>--', [4, 4, 4], [40, 50, 60], 'y*-.')
plt.text(3, 96, '평균 : {}'.format(np.mean(score)))
plt.grid(True)
plt.xlabel('이름')
plt.ylabel('점수')
plt.title('국어 점수')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.rc('font', family='Malgun Gothic')
def scoreBarChart(names, score):
plt.bar(names, score)
plt.show()
def multiBarChart(names, score):
plt.plot(names, score, 'ro--')
plt.plot([1, 2, 3], [70, 80, 90], 'bo:')
plt.plot([1, 1, 1], [10, 20, 30], 'r>--', [4, 4, 4], [40, 50, 60], 'y*-.')
plt.text(3, 96, '평균 : {}'.format(np.mean(score)))
plt.grid(True)
plt.xlabel('이름')
plt.ylabel('점수')
plt.title('국어 점수')
plt.show()
if __name__ == '__main__':
names = ['홍길동', '이순신', '강감찬', '김유신', '임꺽정']
score = [89, 86, 97, 77, 92]
multiBarChart(names, score)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import matplotlib.pyplot as plt
import numpy as np
plt.rc('font', family='Malgun Gothic')
def scoreBarChart(names, score):
plt.bar(names, score)
plt.show()
def multiBarChart(names, score):
plt.plot(names, score, 'ro--')
plt.plot([1, 2, 3], [70, 80, 90], 'bo:')
plt.plot([1, 1, 1], [10, 20, 30], 'r>--', [4, 4, 4], [40, 50, 60], 'y*-.')
plt.text(3, 96, '평균 : {}'.format(np.mean(score)))
plt.grid(True)
plt.xlabel('이름')
plt.ylabel('점수')
plt.title('국어 점수')
plt.show()
if __name__ == '__main__':
names = ['홍길동', '이순신', '강감찬', '김유신', '임꺽정']
score = [89, 86, 97, 77, 92]
multiBarChart(names, score)
<|reserved_special_token_1|>
'''
Created on 2021. 4. 8.
@author: user
'''
import matplotlib.pyplot as plt
import numpy as np
plt.rc("font", family="Malgun Gothic")
def scoreBarChart(names, score):
plt.bar(names, score)
plt.show()
def multiBarChart(names, score):
plt.plot(names, score, "ro--")
plt.plot([1, 2, 3], [70, 80, 90], "bo:")
plt.plot([1, 1, 1], [10, 20, 30], "r>--", [4, 4, 4], [40, 50, 60], "y*-.")
plt.text(3, 96, "평균 : {}".format(np.mean(score)))
plt.grid(True)
plt.xlabel("이름")
plt.ylabel("점수")
plt.title("국어 점수")
plt.show()
if __name__=="__main__":
names = ["홍길동", "이순신", "강감찬", "김유신", "임꺽정"]
score = [89, 86, 97, 77, 92]
#scoreBarChart(names, score)
multiBarChart(names, score)
|
flexible
|
{
"blob_id": "542602a42eb873508ce2ec39d0856f10cc1e04ff",
"index": 8426,
"step-1": "<mask token>\n\n\ndef scoreBarChart(names, score):\n plt.bar(names, score)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef scoreBarChart(names, score):\n plt.bar(names, score)\n plt.show()\n\n\ndef multiBarChart(names, score):\n plt.plot(names, score, 'ro--')\n plt.plot([1, 2, 3], [70, 80, 90], 'bo:')\n plt.plot([1, 1, 1], [10, 20, 30], 'r>--', [4, 4, 4], [40, 50, 60], 'y*-.')\n plt.text(3, 96, '평균 : {}'.format(np.mean(score)))\n plt.grid(True)\n plt.xlabel('이름')\n plt.ylabel('점수')\n plt.title('국어 점수')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\nplt.rc('font', family='Malgun Gothic')\n\n\ndef scoreBarChart(names, score):\n plt.bar(names, score)\n plt.show()\n\n\ndef multiBarChart(names, score):\n plt.plot(names, score, 'ro--')\n plt.plot([1, 2, 3], [70, 80, 90], 'bo:')\n plt.plot([1, 1, 1], [10, 20, 30], 'r>--', [4, 4, 4], [40, 50, 60], 'y*-.')\n plt.text(3, 96, '평균 : {}'.format(np.mean(score)))\n plt.grid(True)\n plt.xlabel('이름')\n plt.ylabel('점수')\n plt.title('국어 점수')\n plt.show()\n\n\nif __name__ == '__main__':\n names = ['홍길동', '이순신', '강감찬', '김유신', '임꺽정']\n score = [89, 86, 97, 77, 92]\n multiBarChart(names, score)\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.rc('font', family='Malgun Gothic')\n\n\ndef scoreBarChart(names, score):\n plt.bar(names, score)\n plt.show()\n\n\ndef multiBarChart(names, score):\n plt.plot(names, score, 'ro--')\n plt.plot([1, 2, 3], [70, 80, 90], 'bo:')\n plt.plot([1, 1, 1], [10, 20, 30], 'r>--', [4, 4, 4], [40, 50, 60], 'y*-.')\n plt.text(3, 96, '평균 : {}'.format(np.mean(score)))\n plt.grid(True)\n plt.xlabel('이름')\n plt.ylabel('점수')\n plt.title('국어 점수')\n plt.show()\n\n\nif __name__ == '__main__':\n names = ['홍길동', '이순신', '강감찬', '김유신', '임꺽정']\n score = [89, 86, 97, 77, 92]\n multiBarChart(names, score)\n",
"step-5": "'''\nCreated on 2021. 4. 8.\n\n@author: user\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rc(\"font\", family=\"Malgun Gothic\")\n\ndef scoreBarChart(names, score):\n plt.bar(names, score)\n plt.show()\n \ndef multiBarChart(names, score):\n plt.plot(names, score, \"ro--\")\n \n plt.plot([1, 2, 3], [70, 80, 90], \"bo:\")\n plt.plot([1, 1, 1], [10, 20, 30], \"r>--\", [4, 4, 4], [40, 50, 60], \"y*-.\")\n \n plt.text(3, 96, \"평균 : {}\".format(np.mean(score)))\n plt.grid(True)\n \n plt.xlabel(\"이름\")\n plt.ylabel(\"점수\")\n plt.title(\"국어 점수\")\n plt.show()\n\n\nif __name__==\"__main__\":\n names = [\"홍길동\", \"이순신\", \"강감찬\", \"김유신\", \"임꺽정\"]\n score = [89, 86, 97, 77, 92]\n \n #scoreBarChart(names, score)\n multiBarChart(names, score)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import itertools
n = int(input())
a = [list(map(int, input().split(" "))) for i in range(n)]
ans = 0
for [ix,iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2
print(ans/n)
|
normal
|
{
"blob_id": "a210a015284130f23bfec99898f2f21163a33a67",
"index": 9897,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-3": "<mask token>\nn = int(input())\na = [list(map(int, input().split(' '))) for i in range(n)]\nans = 0\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-4": "import itertools\nn = int(input())\na = [list(map(int, input().split(' '))) for i in range(n)]\nans = 0\nfor [ix, iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx - ix) ** 2 + (jy - iy) ** 2) ** 0.5 * 2\nprint(ans / n)\n",
"step-5": "import itertools\nn = int(input())\na = [list(map(int, input().split(\" \"))) for i in range(n)]\nans = 0\nfor [ix,iy], [jx, jy] in itertools.combinations(a, 2):\n ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2\nprint(ans/n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
<|reserved_special_token_0|>
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,
'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer,
supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
print('*' * 100)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_space(context_list):
space_context = []
for idx, context in enumerate(context_list):
space_sent_list = []
sent_list = context[1]
if idx == 0:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
if sent_idx == 0:
space_sent_list.append(sent.strip())
else:
space_sent_list.append(' ' + sent)
else:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
space_sent_list.append(' ' + sent)
space_context.append([context[0], space_sent_list])
return space_context
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
<|reserved_special_token_0|>
def fintuner_in_answer_context(answer, context, supporting_facts):
ans_idx = find_answer(answer=answer, sents=context[0][1])
support_facts = set([(x[0], x[1]) for x in supporting_facts])
if ans_idx > 0 and len(support_facts) > 1:
return True
return False
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,
'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer,
supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
print('*' * 100)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_space(context_list):
space_context = []
for idx, context in enumerate(context_list):
space_sent_list = []
sent_list = context[1]
if idx == 0:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
if sent_idx == 0:
space_sent_list.append(sent.strip())
else:
space_sent_list.append(' ' + sent)
else:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
space_sent_list.append(' ' + sent)
space_context.append([context[0], space_sent_list])
return space_context
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
def find_in_answer_context(answer, context):
founds = []
for ctx_idx, ctx in enumerate(context):
ans_idx = find_answer(answer=answer, sents=ctx[1])
if ans_idx >= 0:
founds.append(1)
else:
founds.append(0)
ans_found_idx = -1
assert sum(founds) <= 2
if sum(founds) > 0:
if founds[0] == 1:
ans_found_idx = 0
else:
ans_found_idx = 1
return ans_found_idx
def fintuner_in_answer_context(answer, context, supporting_facts):
ans_idx = find_answer(answer=answer, sents=context[0][1])
support_facts = set([(x[0], x[1]) for x in supporting_facts])
if ans_idx > 0 and len(support_facts) > 1:
return True
return False
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,
'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer,
supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
print('*' * 100)
<|reserved_special_token_1|>
from envs import DATASET_FOLDER
from os.path import join
import json
import collections
from tqdm import tqdm
def add_space(context_list):
space_context = []
for idx, context in enumerate(context_list):
space_sent_list = []
sent_list = context[1]
if idx == 0:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
if sent_idx == 0:
space_sent_list.append(sent.strip())
else:
space_sent_list.append(' ' + sent)
else:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
space_sent_list.append(' ' + sent)
space_context.append([context[0], space_sent_list])
return space_context
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
def find_in_answer_context(answer, context):
founds = []
for ctx_idx, ctx in enumerate(context):
ans_idx = find_answer(answer=answer, sents=ctx[1])
if ans_idx >= 0:
founds.append(1)
else:
founds.append(0)
ans_found_idx = -1
assert sum(founds) <= 2
if sum(founds) > 0:
if founds[0] == 1:
ans_found_idx = 0
else:
ans_found_idx = 1
return ans_found_idx
def fintuner_in_answer_context(answer, context, supporting_facts):
ans_idx = find_answer(answer=answer, sents=context[0][1])
support_facts = set([(x[0], x[1]) for x in supporting_facts])
if ans_idx > 0 and len(support_facts) > 1:
return True
return False
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,
'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer,
supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,
'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'
) as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'
) as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data),
DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
print('*' * 100)
<|reserved_special_token_1|>
from envs import DATASET_FOLDER
from os.path import join
import json
import collections
from tqdm import tqdm
def add_space(context_list):
space_context = []
for idx, context in enumerate(context_list):
space_sent_list = []
sent_list = context[1]
if idx == 0:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
if sent_idx == 0:
space_sent_list.append(sent.strip())
else:
space_sent_list.append(' ' + sent)
else:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
space_sent_list.append(' ' + sent)
space_context.append([context[0], space_sent_list])
return space_context
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
def find_in_answer_context(answer, context):
founds = []
for ctx_idx, ctx in enumerate(context):
ans_idx = find_answer(answer=answer, sents=ctx[1])
if ans_idx >= 0:
founds.append(1)
# if ctx_idx == 0:
# print('{} : {}: {}'.format(ctx_idx, ans_idx, len(ctx[1])))
else:
founds.append(0)
ans_found_idx = -1
assert sum(founds) <= 2
if sum(founds) > 0:
if founds[0] == 1:
ans_found_idx = 0
else:
ans_found_idx = 1
return ans_found_idx
def fintuner_in_answer_context(answer, context, supporting_facts):
ans_idx = find_answer(answer=answer, sents=context[0][1])
support_facts = set([(x[0], x[1]) for x in supporting_facts])
if ans_idx > 0 and len(support_facts) > 1:
# if (context[0][0], ans_idx) not in support_facts:
# print(ans_idx, len(context[0][1]))
# print(supporting_facts)
return True
return False
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json') # converted_docred_total.json
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
# print(case)
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer, supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
# for ctx_idx, ctx in enumerate(context):
# is_answer_found = find_answer(answer=answer, sents=ctx[1])
# if is_answer_found:
# answer_position.append(ctx_idx)
# break
# else:
# continue
# for key_name, key_value in case.items():
# if key_name != 'context':
# print('{}: {}'.format(key_name, key_value))
# else:
# for ctx_idx, ctx in enumerate(key_value):
# print('{}: {}'.format(ctx_idx + 1, ctx))
# context = case['context']
# space_context = add_space(context_list=context)
# case['context'] = space_context
# examples.append(case)
# print(context)
# print('-' * 50)
# print(add_space(context_list=context))
# print('*' * 100)
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
# sorted_title_dict = sorted(title_dict.items(), key=lambda kv: kv[1])
# for key, value in sorted_title_dict:
# print('{}: {}'.format(key, value))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER, 'data_raw/converted_docred_total.json') #converted_docred_total.json
# Saved_raw_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/space_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
# print(case)
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
# context = case['context']
# space_context = add_space(context_list=context)
# case['context'] = space_context
# examples.append(case)
# print(context)
# print('-' * 50)
# print(add_space(context_list=context))
print('*' * 100)
# print('key {}'.format(key))
# print(para_data[key])
# json.dump(examples, open(Saved_raw_DOCRED_OUTPUT_PROCESSED, 'w'))
|
flexible
|
{
"blob_id": "a179d3d2f04a101eaa60b5964c2b1cd77071633f",
"index": 5344,
"step-1": "<mask token>\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\n<mask token>\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-2": "<mask token>\n\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\n<mask token>\n\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n return True\n return False\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-3": "<mask token>\n\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\ndef find_in_answer_context(answer, context):\n founds = []\n for ctx_idx, ctx in enumerate(context):\n ans_idx = find_answer(answer=answer, sents=ctx[1])\n if ans_idx >= 0:\n founds.append(1)\n else:\n founds.append(0)\n ans_found_idx = -1\n assert sum(founds) <= 2\n if sum(founds) > 0:\n if founds[0] == 1:\n ans_found_idx = 0\n else:\n ans_found_idx = 1\n return ans_found_idx\n\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n return True\n return False\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-4": "from envs import DATASET_FOLDER\nfrom os.path import join\nimport json\nimport collections\nfrom tqdm import tqdm\n\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\ndef find_in_answer_context(answer, context):\n founds = []\n for ctx_idx, ctx in enumerate(context):\n ans_idx = find_answer(answer=answer, sents=ctx[1])\n if ans_idx >= 0:\n founds.append(1)\n else:\n founds.append(0)\n ans_found_idx = -1\n assert sum(founds) <= 2\n if sum(founds) > 0:\n if founds[0] == 1:\n ans_found_idx = 0\n else:\n ans_found_idx = 1\n return ans_found_idx\n\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n return True\n return False\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-5": "from envs import DATASET_FOLDER\nfrom os.path import join\nimport json\nimport collections\nfrom tqdm import tqdm\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\ndef find_in_answer_context(answer, context):\n founds = []\n for ctx_idx, ctx in enumerate(context):\n ans_idx = find_answer(answer=answer, sents=ctx[1])\n if ans_idx >= 0:\n founds.append(1)\n # if ctx_idx == 0:\n # print('{} : {}: {}'.format(ctx_idx, ans_idx, len(ctx[1])))\n else:\n founds.append(0)\n ans_found_idx = -1\n assert sum(founds) <= 2\n if sum(founds) > 0:\n if founds[0] == 1:\n ans_found_idx = 0\n else:\n ans_found_idx = 1\n return ans_found_idx\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n # if (context[0][0], ans_idx) not in support_facts:\n # print(ans_idx, len(context[0][1]))\n # print(supporting_facts)\n return True\n return False\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json') # converted_docred_total.json\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n # print(case)\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n\n fine_tune_flag = fintuner_in_answer_context(answer=answer, supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n # for ctx_idx, ctx in enumerate(context):\n # is_answer_found = find_answer(answer=answer, sents=ctx[1])\n # if is_answer_found:\n # answer_position.append(ctx_idx)\n # break\n # else:\n # continue\n # for key_name, key_value in case.items():\n # if key_name != 'context':\n # print('{}: {}'.format(key_name, key_value))\n # else:\n # for ctx_idx, ctx in enumerate(key_value):\n # print('{}: {}'.format(ctx_idx + 1, ctx))\n # context = case['context']\n # space_context = add_space(context_list=context)\n # case['context'] = space_context\n # examples.append(case)\n # print(context)\n # print('-' * 50)\n # print(add_space(context_list=context))\n # print('*' * 100)\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n # sorted_title_dict = sorted(title_dict.items(), key=lambda kv: kv[1])\n # for key, value in sorted_title_dict:\n # print('{}: {}'.format(key, value))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER, 'data_raw/converted_docred_total.json') #converted_docred_total.json\n # Saved_raw_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/space_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n # print(case)\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n # context = case['context']\n # space_context = add_space(context_list=context)\n # case['context'] = space_context\n # examples.append(case)\n # print(context)\n # print('-' * 50)\n # print(add_space(context_list=context))\n print('*' * 100)\n # print('key {}'.format(key))\n # print(para_data[key])\n\n # json.dump(examples, open(Saved_raw_DOCRED_OUTPUT_PROCESSED, 'w'))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from collections import defaultdict
# The order of the steps doesn't matter, so the distance
# function is very simple
def dist(counts):
n = abs(counts["n"] - counts["s"])
nw = abs(counts["nw"] - counts["se"])
ne = abs(counts["ne"] - counts["sw"])
return n + max(ne,nw)
if __name__ == "__main__":
counts = defaultdict(int)
with open("day11.input.txt") as f:
INPUT = f.read().strip()
dir_list = INPUT.split(",")
# The order of the steps doesn't matter so we just need
# to count each type of step
for dir in dir_list:
counts[dir] += 1
print(dist(counts))
counts = defaultdict(int)
with open("day11.input.txt") as f:
INPUT = f.read().strip()
dir_list = INPUT.split(",")
# print(dir_list)
max_d = -1
for dir in dir_list:
# Keep running counts and check for distance at every
# step to find max
counts[dir] += 1
max_d = max(max_d,dist(counts))
print("max=", max_d)
|
normal
|
{
"blob_id": "ac2e9145e3345e5448683d684b69d2356e3214ce",
"index": 9999,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef dist(counts):\n n = abs(counts['n'] - counts['s'])\n nw = abs(counts['nw'] - counts['se'])\n ne = abs(counts['ne'] - counts['sw'])\n return n + max(ne, nw)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef dist(counts):\n n = abs(counts['n'] - counts['s'])\n nw = abs(counts['nw'] - counts['se'])\n ne = abs(counts['ne'] - counts['sw'])\n return n + max(ne, nw)\n\n\nif __name__ == '__main__':\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n for dir in dir_list:\n counts[dir] += 1\n print(dist(counts))\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n max_d = -1\n for dir in dir_list:\n counts[dir] += 1\n max_d = max(max_d, dist(counts))\n print('max=', max_d)\n",
"step-4": "from collections import defaultdict\n\n\ndef dist(counts):\n n = abs(counts['n'] - counts['s'])\n nw = abs(counts['nw'] - counts['se'])\n ne = abs(counts['ne'] - counts['sw'])\n return n + max(ne, nw)\n\n\nif __name__ == '__main__':\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n for dir in dir_list:\n counts[dir] += 1\n print(dist(counts))\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n max_d = -1\n for dir in dir_list:\n counts[dir] += 1\n max_d = max(max_d, dist(counts))\n print('max=', max_d)\n",
"step-5": "from collections import defaultdict\n\n# The order of the steps doesn't matter, so the distance\n# function is very simple\ndef dist(counts):\n n = abs(counts[\"n\"] - counts[\"s\"])\n nw = abs(counts[\"nw\"] - counts[\"se\"])\n ne = abs(counts[\"ne\"] - counts[\"sw\"])\n return n + max(ne,nw)\n\nif __name__ == \"__main__\":\n counts = defaultdict(int)\n with open(\"day11.input.txt\") as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(\",\")\n # The order of the steps doesn't matter so we just need\n # to count each type of step\n for dir in dir_list:\n counts[dir] += 1\n\n print(dist(counts))\n\n counts = defaultdict(int)\n with open(\"day11.input.txt\") as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(\",\")\n # print(dir_list)\n max_d = -1\n for dir in dir_list:\n # Keep running counts and check for distance at every\n # step to find max\n counts[dir] += 1\n max_d = max(max_d,dist(counts))\n print(\"max=\", max_d)\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve_model(K, R, N, L_max, G):
print(
'parameters==| k=%d \t |R=%s \t |N=%d \t |eta=%f \t |L_max=%f \t |G=%f'
% (K, R, N, eta, L_max, G))
R_sum = sum(R.values())
gamma = c0 * d0 * 1 / R_sum
alpha = eta * eta * G
beta = eta * D * L_max * math.sqrt(G)
lamda = eta * eta * epsilon * epsilon
print(
'parameters==| d0=%f \t |gamma=%s \t |alpha=%f \t |beta=%f \t |lamda=%f'
% (d0, gamma, alpha, beta, lamda))
m = Model('mip1')
T = m.addVar(vtype=GRB.INTEGER, name='T')
E = {}
for k in range(K):
E[k] = m.addVar(vtype=GRB.INTEGER, name='E')
t_max = m.addVar(vtype=GRB.CONTINUOUS, name='t_max')
a = m.addVar(vtype=GRB.CONTINUOUS, name='R_1')
b = m.addVar(vtype=GRB.CONTINUOUS, name='R_2')
c, mul, pow = {}, {}, {}
for k in range(K):
c[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_3')
mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_4')
pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_5')
d = m.addVar(vtype=GRB.CONTINUOUS, name='R_6')
E_min = m.addVar(vtype=GRB.INTEGER, name='E_min')
E_min_2 = m.addVar(vtype=GRB.INTEGER, name='E_min_2')
s = m.addVar(vtype=GRB.CONTINUOUS, name='R_sum')
m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)
for k in range(K):
m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], 'c0')
m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))
for k in range(K):
m.addConstr(mul[k] == E[k] * (E[k] - 1))
m.addConstr(c[k] * c[k] == R[k])
m.addConstr(pow[k] == mul[k] * c[k])
m.addConstr(E_min <= E[k])
m.addConstr(b == quicksum(pow[k] * pow[k] for k in range(K)))
m.addConstr(d * d == b)
m.addConstr(s == alpha * a + D * D + beta * d)
m.addConstr(E_min_2 == E_min * E_min)
m.addConstr(s * s <= E_min_2 * T * 1, 'c1')
m.params.NonConvex = 2
m.setParam('OutputFlag', 0)
m.params.TimeLimit = 40
m.optimize()
local_epoch = []
if m.status == GRB.OPTIMAL:
print('solved!')
Ex = m.getAttr('x', E)
for k in range(K):
if E[k].x > 0.0001:
local_epoch.append(int(Ex[k]))
else:
local_epoch = [(1) for i in range(K)]
print('Optimization solution:', local_epoch)
return local_epoch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N = 897
p = {(0): 3, (1): 1.5, (2): 1.2, (3): 2, (4): 1}
t_2 = 0
epsilon = 0.01
D = 1000
c0 = 20
d0 = 0.2
eta = 0.5
def solve_model(K, R, N, L_max, G):
print(
'parameters==| k=%d \t |R=%s \t |N=%d \t |eta=%f \t |L_max=%f \t |G=%f'
% (K, R, N, eta, L_max, G))
R_sum = sum(R.values())
gamma = c0 * d0 * 1 / R_sum
alpha = eta * eta * G
beta = eta * D * L_max * math.sqrt(G)
lamda = eta * eta * epsilon * epsilon
print(
'parameters==| d0=%f \t |gamma=%s \t |alpha=%f \t |beta=%f \t |lamda=%f'
% (d0, gamma, alpha, beta, lamda))
m = Model('mip1')
T = m.addVar(vtype=GRB.INTEGER, name='T')
E = {}
for k in range(K):
E[k] = m.addVar(vtype=GRB.INTEGER, name='E')
t_max = m.addVar(vtype=GRB.CONTINUOUS, name='t_max')
a = m.addVar(vtype=GRB.CONTINUOUS, name='R_1')
b = m.addVar(vtype=GRB.CONTINUOUS, name='R_2')
c, mul, pow = {}, {}, {}
for k in range(K):
c[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_3')
mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_4')
pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_5')
d = m.addVar(vtype=GRB.CONTINUOUS, name='R_6')
E_min = m.addVar(vtype=GRB.INTEGER, name='E_min')
E_min_2 = m.addVar(vtype=GRB.INTEGER, name='E_min_2')
s = m.addVar(vtype=GRB.CONTINUOUS, name='R_sum')
m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)
for k in range(K):
m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], 'c0')
m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))
for k in range(K):
m.addConstr(mul[k] == E[k] * (E[k] - 1))
m.addConstr(c[k] * c[k] == R[k])
m.addConstr(pow[k] == mul[k] * c[k])
m.addConstr(E_min <= E[k])
m.addConstr(b == quicksum(pow[k] * pow[k] for k in range(K)))
m.addConstr(d * d == b)
m.addConstr(s == alpha * a + D * D + beta * d)
m.addConstr(E_min_2 == E_min * E_min)
m.addConstr(s * s <= E_min_2 * T * 1, 'c1')
m.params.NonConvex = 2
m.setParam('OutputFlag', 0)
m.params.TimeLimit = 40
m.optimize()
local_epoch = []
if m.status == GRB.OPTIMAL:
print('solved!')
Ex = m.getAttr('x', E)
for k in range(K):
if E[k].x > 0.0001:
local_epoch.append(int(Ex[k]))
else:
local_epoch = [(1) for i in range(K)]
print('Optimization solution:', local_epoch)
return local_epoch
<|reserved_special_token_1|>
from gurobipy import *
import math
N = 897
p = {(0): 3, (1): 1.5, (2): 1.2, (3): 2, (4): 1}
t_2 = 0
epsilon = 0.01
D = 1000
c0 = 20
d0 = 0.2
eta = 0.5
def solve_model(K, R, N, L_max, G):
print(
'parameters==| k=%d \t |R=%s \t |N=%d \t |eta=%f \t |L_max=%f \t |G=%f'
% (K, R, N, eta, L_max, G))
R_sum = sum(R.values())
gamma = c0 * d0 * 1 / R_sum
alpha = eta * eta * G
beta = eta * D * L_max * math.sqrt(G)
lamda = eta * eta * epsilon * epsilon
print(
'parameters==| d0=%f \t |gamma=%s \t |alpha=%f \t |beta=%f \t |lamda=%f'
% (d0, gamma, alpha, beta, lamda))
m = Model('mip1')
T = m.addVar(vtype=GRB.INTEGER, name='T')
E = {}
for k in range(K):
E[k] = m.addVar(vtype=GRB.INTEGER, name='E')
t_max = m.addVar(vtype=GRB.CONTINUOUS, name='t_max')
a = m.addVar(vtype=GRB.CONTINUOUS, name='R_1')
b = m.addVar(vtype=GRB.CONTINUOUS, name='R_2')
c, mul, pow = {}, {}, {}
for k in range(K):
c[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_3')
mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_4')
pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_5')
d = m.addVar(vtype=GRB.CONTINUOUS, name='R_6')
E_min = m.addVar(vtype=GRB.INTEGER, name='E_min')
E_min_2 = m.addVar(vtype=GRB.INTEGER, name='E_min_2')
s = m.addVar(vtype=GRB.CONTINUOUS, name='R_sum')
m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)
for k in range(K):
m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], 'c0')
m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))
for k in range(K):
m.addConstr(mul[k] == E[k] * (E[k] - 1))
m.addConstr(c[k] * c[k] == R[k])
m.addConstr(pow[k] == mul[k] * c[k])
m.addConstr(E_min <= E[k])
m.addConstr(b == quicksum(pow[k] * pow[k] for k in range(K)))
m.addConstr(d * d == b)
m.addConstr(s == alpha * a + D * D + beta * d)
m.addConstr(E_min_2 == E_min * E_min)
m.addConstr(s * s <= E_min_2 * T * 1, 'c1')
m.params.NonConvex = 2
m.setParam('OutputFlag', 0)
m.params.TimeLimit = 40
m.optimize()
local_epoch = []
if m.status == GRB.OPTIMAL:
print('solved!')
Ex = m.getAttr('x', E)
for k in range(K):
if E[k].x > 0.0001:
local_epoch.append(int(Ex[k]))
else:
local_epoch = [(1) for i in range(K)]
print('Optimization solution:', local_epoch)
return local_epoch
<|reserved_special_token_1|>
from gurobipy import *
import math
# params.NonConvex = 2
# K = 5
# R = {0: 1000, 1: 5000, 2: 10000, 3: 20000, 4: 69354} # imbalanced
# R = {0: 50, 1: 100, 2: 150, 3: 84, 4: 400} # imbalanced
# R = {0: 100, 1: 200, 2: 484} # imbalanced
# R = {0: 10, 1: 20, 2: 30, 3: 50, 4: 100} # imbalanced
# R = {0: 10, 1: 13, 2: 20, 3: 30, 4: 50} # imbalanced
# R = {0: 42, 1: 42, 2: 42, 3: 42, 4: 42} # balanced
# R_sum = sum(R.values())
# print('sum of R:', R_sum)
N = 897
# B = 100
# R = {0: 10, 1: 23, 2: 90}
# R_sum = sum(R.values())
# K = len(R)
# p = {0: 3, 1: 1.5, 2: 1.2, 3: 2, 4: 1, 5: 1.3, 6: 1, 7: 2, 8: 3, 9: 2}
p = {0: 3, 1: 1.5, 2: 1.2, 3: 2, 4: 1}
t_2 = 0
epsilon = 0.01
D = 1000
c0 = 20
d0 = 0.2
eta = 0.5
# L_max = 0.4561413560946449
# G = 0.02
# G = 0.08
# L_max = 0.09
# L_max = 1.167833240579852
# G = 0.49404299383897043
def solve_model(K, R, N, L_max, G):
print('parameters==| k=%d \t |R=%s \t |N=%d \t |eta=%f \t |L_max=%f \t |G=%f'% (K, R, N, eta, L_max, G))
R_sum = sum(R.values())
gamma = c0 * d0 * 1 / R_sum
alpha = eta * eta * G
beta = eta * D * L_max * math.sqrt(G)
lamda = eta * eta * epsilon * epsilon
print('parameters==| d0=%f \t |gamma=%s \t |alpha=%f \t |beta=%f \t |lamda=%f'% (d0, gamma, alpha, beta, lamda))
# Create a new model
m = Model("mip1")
# Create variables
T = m.addVar(vtype=GRB.INTEGER, name="T")
E = {}
for k in range(K):
E[k] = m.addVar(vtype=GRB.INTEGER, name="E")
t_max = m.addVar(vtype=GRB.CONTINUOUS, name="t_max")
a = m.addVar(vtype=GRB.CONTINUOUS, name="R_1")
b = m.addVar(vtype=GRB.CONTINUOUS, name="R_2")
c, mul, pow = {}, {}, {}
for k in range(K):
c[k] = m.addVar(vtype=GRB.CONTINUOUS, name="R_3")
mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name="R_4")
pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name="R_5")
d = m.addVar(vtype=GRB.CONTINUOUS, name="R_6")
E_min = m.addVar(vtype=GRB.INTEGER, name="E_min")
E_min_2 = m.addVar(vtype=GRB.INTEGER, name="E_min_2")
s = m.addVar(vtype=GRB.CONTINUOUS, name="R_sum")
# Set objective
m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)
# Add constraint: T_max >= E_k * R_k
for k in range(K):
m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], "c0")
# Add constraint: a == \sum E_k * E_k * R_k
m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))
for k in range(K):
m.addConstr(mul[k] == E[k] * (E[k] - 1))
m.addConstr(c[k] * c[k] == R[k])
m.addConstr(pow[k] == mul[k] * c[k])
# m.addConstr(E[k] >=1)
m.addConstr(E_min <= E[k])
m.addConstr(b == quicksum((pow[k] * pow[k]) for k in range(K)))
m.addConstr(d * d == b)
m.addConstr(s == alpha * a + D * D + beta * d)
m.addConstr(E_min_2 == E_min * E_min)
# Add constraint: x + y >= 1 R_e <= epsilon
m.addConstr(s * s <= E_min_2 * T * 1, "c1")
m.params.NonConvex = 2
m.setParam("OutputFlag", 0)
m.params.TimeLimit = 40 # 限制求解时间为 100s
m.optimize()
local_epoch = []
if m.status == GRB.OPTIMAL:
print('solved!')
Ex = m.getAttr('x', E)
for k in range(K):
if E[k].x > 0.0001:
local_epoch.append(int(Ex[k]))
else:
local_epoch = [1 for i in range(K)]
print('Optimization solution:', local_epoch)
return local_epoch
# K = 5
# G = 0.23
# L_max = 1.48
# R = {0: 10, 1: 13, 2: 20, 3: 30, 4: 50} # imbalanced
# N = 30000
# eta = 0.5
# print('parameters:', K, R, N, eta, L_max, G)
# ll = solve_model(K, R, N, L_max, G)
|
flexible
|
{
"blob_id": "2ed9eafb6e26971f642d1e33cbb3d1f3df34990a",
"index": 3401,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solve_model(K, R, N, L_max, G):\n print(\n 'parameters==| k=%d \\t |R=%s \\t |N=%d \\t |eta=%f \\t |L_max=%f \\t |G=%f'\n % (K, R, N, eta, L_max, G))\n R_sum = sum(R.values())\n gamma = c0 * d0 * 1 / R_sum\n alpha = eta * eta * G\n beta = eta * D * L_max * math.sqrt(G)\n lamda = eta * eta * epsilon * epsilon\n print(\n 'parameters==| d0=%f \\t |gamma=%s \\t |alpha=%f \\t |beta=%f \\t |lamda=%f'\n % (d0, gamma, alpha, beta, lamda))\n m = Model('mip1')\n T = m.addVar(vtype=GRB.INTEGER, name='T')\n E = {}\n for k in range(K):\n E[k] = m.addVar(vtype=GRB.INTEGER, name='E')\n t_max = m.addVar(vtype=GRB.CONTINUOUS, name='t_max')\n a = m.addVar(vtype=GRB.CONTINUOUS, name='R_1')\n b = m.addVar(vtype=GRB.CONTINUOUS, name='R_2')\n c, mul, pow = {}, {}, {}\n for k in range(K):\n c[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_3')\n mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_4')\n pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_5')\n d = m.addVar(vtype=GRB.CONTINUOUS, name='R_6')\n E_min = m.addVar(vtype=GRB.INTEGER, name='E_min')\n E_min_2 = m.addVar(vtype=GRB.INTEGER, name='E_min_2')\n s = m.addVar(vtype=GRB.CONTINUOUS, name='R_sum')\n m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)\n for k in range(K):\n m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], 'c0')\n m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))\n for k in range(K):\n m.addConstr(mul[k] == E[k] * (E[k] - 1))\n m.addConstr(c[k] * c[k] == R[k])\n m.addConstr(pow[k] == mul[k] * c[k])\n m.addConstr(E_min <= E[k])\n m.addConstr(b == quicksum(pow[k] * pow[k] for k in range(K)))\n m.addConstr(d * d == b)\n m.addConstr(s == alpha * a + D * D + beta * d)\n m.addConstr(E_min_2 == E_min * E_min)\n m.addConstr(s * s <= E_min_2 * T * 1, 'c1')\n m.params.NonConvex = 2\n m.setParam('OutputFlag', 0)\n m.params.TimeLimit = 40\n m.optimize()\n local_epoch = []\n if m.status == GRB.OPTIMAL:\n print('solved!')\n Ex = m.getAttr('x', E)\n for k in range(K):\n if E[k].x > 0.0001:\n local_epoch.append(int(Ex[k]))\n else:\n local_epoch = [(1) for i in range(K)]\n print('Optimization solution:', local_epoch)\n return local_epoch\n",
"step-3": "<mask token>\nN = 897\np = {(0): 3, (1): 1.5, (2): 1.2, (3): 2, (4): 1}\nt_2 = 0\nepsilon = 0.01\nD = 1000\nc0 = 20\nd0 = 0.2\neta = 0.5\n\n\ndef solve_model(K, R, N, L_max, G):\n print(\n 'parameters==| k=%d \\t |R=%s \\t |N=%d \\t |eta=%f \\t |L_max=%f \\t |G=%f'\n % (K, R, N, eta, L_max, G))\n R_sum = sum(R.values())\n gamma = c0 * d0 * 1 / R_sum\n alpha = eta * eta * G\n beta = eta * D * L_max * math.sqrt(G)\n lamda = eta * eta * epsilon * epsilon\n print(\n 'parameters==| d0=%f \\t |gamma=%s \\t |alpha=%f \\t |beta=%f \\t |lamda=%f'\n % (d0, gamma, alpha, beta, lamda))\n m = Model('mip1')\n T = m.addVar(vtype=GRB.INTEGER, name='T')\n E = {}\n for k in range(K):\n E[k] = m.addVar(vtype=GRB.INTEGER, name='E')\n t_max = m.addVar(vtype=GRB.CONTINUOUS, name='t_max')\n a = m.addVar(vtype=GRB.CONTINUOUS, name='R_1')\n b = m.addVar(vtype=GRB.CONTINUOUS, name='R_2')\n c, mul, pow = {}, {}, {}\n for k in range(K):\n c[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_3')\n mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_4')\n pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_5')\n d = m.addVar(vtype=GRB.CONTINUOUS, name='R_6')\n E_min = m.addVar(vtype=GRB.INTEGER, name='E_min')\n E_min_2 = m.addVar(vtype=GRB.INTEGER, name='E_min_2')\n s = m.addVar(vtype=GRB.CONTINUOUS, name='R_sum')\n m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)\n for k in range(K):\n m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], 'c0')\n m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))\n for k in range(K):\n m.addConstr(mul[k] == E[k] * (E[k] - 1))\n m.addConstr(c[k] * c[k] == R[k])\n m.addConstr(pow[k] == mul[k] * c[k])\n m.addConstr(E_min <= E[k])\n m.addConstr(b == quicksum(pow[k] * pow[k] for k in range(K)))\n m.addConstr(d * d == b)\n m.addConstr(s == alpha * a + D * D + beta * d)\n m.addConstr(E_min_2 == E_min * E_min)\n m.addConstr(s * s <= E_min_2 * T * 1, 'c1')\n m.params.NonConvex = 2\n m.setParam('OutputFlag', 0)\n m.params.TimeLimit = 40\n m.optimize()\n local_epoch = []\n if m.status == GRB.OPTIMAL:\n print('solved!')\n Ex = m.getAttr('x', E)\n for k in range(K):\n if E[k].x > 0.0001:\n local_epoch.append(int(Ex[k]))\n else:\n local_epoch = [(1) for i in range(K)]\n print('Optimization solution:', local_epoch)\n return local_epoch\n",
"step-4": "from gurobipy import *\nimport math\nN = 897\np = {(0): 3, (1): 1.5, (2): 1.2, (3): 2, (4): 1}\nt_2 = 0\nepsilon = 0.01\nD = 1000\nc0 = 20\nd0 = 0.2\neta = 0.5\n\n\ndef solve_model(K, R, N, L_max, G):\n print(\n 'parameters==| k=%d \\t |R=%s \\t |N=%d \\t |eta=%f \\t |L_max=%f \\t |G=%f'\n % (K, R, N, eta, L_max, G))\n R_sum = sum(R.values())\n gamma = c0 * d0 * 1 / R_sum\n alpha = eta * eta * G\n beta = eta * D * L_max * math.sqrt(G)\n lamda = eta * eta * epsilon * epsilon\n print(\n 'parameters==| d0=%f \\t |gamma=%s \\t |alpha=%f \\t |beta=%f \\t |lamda=%f'\n % (d0, gamma, alpha, beta, lamda))\n m = Model('mip1')\n T = m.addVar(vtype=GRB.INTEGER, name='T')\n E = {}\n for k in range(K):\n E[k] = m.addVar(vtype=GRB.INTEGER, name='E')\n t_max = m.addVar(vtype=GRB.CONTINUOUS, name='t_max')\n a = m.addVar(vtype=GRB.CONTINUOUS, name='R_1')\n b = m.addVar(vtype=GRB.CONTINUOUS, name='R_2')\n c, mul, pow = {}, {}, {}\n for k in range(K):\n c[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_3')\n mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_4')\n pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name='R_5')\n d = m.addVar(vtype=GRB.CONTINUOUS, name='R_6')\n E_min = m.addVar(vtype=GRB.INTEGER, name='E_min')\n E_min_2 = m.addVar(vtype=GRB.INTEGER, name='E_min_2')\n s = m.addVar(vtype=GRB.CONTINUOUS, name='R_sum')\n m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)\n for k in range(K):\n m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], 'c0')\n m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))\n for k in range(K):\n m.addConstr(mul[k] == E[k] * (E[k] - 1))\n m.addConstr(c[k] * c[k] == R[k])\n m.addConstr(pow[k] == mul[k] * c[k])\n m.addConstr(E_min <= E[k])\n m.addConstr(b == quicksum(pow[k] * pow[k] for k in range(K)))\n m.addConstr(d * d == b)\n m.addConstr(s == alpha * a + D * D + beta * d)\n m.addConstr(E_min_2 == E_min * E_min)\n m.addConstr(s * s <= E_min_2 * T * 1, 'c1')\n m.params.NonConvex = 2\n m.setParam('OutputFlag', 0)\n m.params.TimeLimit = 40\n m.optimize()\n local_epoch = []\n if m.status == GRB.OPTIMAL:\n print('solved!')\n Ex = m.getAttr('x', E)\n for k in range(K):\n if E[k].x > 0.0001:\n local_epoch.append(int(Ex[k]))\n else:\n local_epoch = [(1) for i in range(K)]\n print('Optimization solution:', local_epoch)\n return local_epoch\n",
"step-5": "from gurobipy import *\r\nimport math\r\n\r\n# params.NonConvex = 2\r\n# K = 5\r\n# R = {0: 1000, 1: 5000, 2: 10000, 3: 20000, 4: 69354} # imbalanced\r\n\r\n# R = {0: 50, 1: 100, 2: 150, 3: 84, 4: 400} # imbalanced\r\n\r\n# R = {0: 100, 1: 200, 2: 484} # imbalanced\r\n\r\n# R = {0: 10, 1: 20, 2: 30, 3: 50, 4: 100} # imbalanced\r\n# R = {0: 10, 1: 13, 2: 20, 3: 30, 4: 50} # imbalanced\r\n# R = {0: 42, 1: 42, 2: 42, 3: 42, 4: 42} # balanced\r\n# R_sum = sum(R.values())\r\n# print('sum of R:', R_sum)\r\nN = 897\r\n# B = 100\r\n# R = {0: 10, 1: 23, 2: 90}\r\n# R_sum = sum(R.values())\r\n# K = len(R)\r\n# p = {0: 3, 1: 1.5, 2: 1.2, 3: 2, 4: 1, 5: 1.3, 6: 1, 7: 2, 8: 3, 9: 2}\r\np = {0: 3, 1: 1.5, 2: 1.2, 3: 2, 4: 1}\r\n\r\nt_2 = 0\r\nepsilon = 0.01\r\nD = 1000\r\nc0 = 20\r\nd0 = 0.2\r\neta = 0.5\r\n# L_max = 0.4561413560946449\r\n# G = 0.02\r\n# G = 0.08\r\n# L_max = 0.09\r\n# L_max = 1.167833240579852\r\n# G = 0.49404299383897043\r\n\r\ndef solve_model(K, R, N, L_max, G):\r\n print('parameters==| k=%d \\t |R=%s \\t |N=%d \\t |eta=%f \\t |L_max=%f \\t |G=%f'% (K, R, N, eta, L_max, G))\r\n R_sum = sum(R.values())\r\n gamma = c0 * d0 * 1 / R_sum\r\n alpha = eta * eta * G\r\n beta = eta * D * L_max * math.sqrt(G)\r\n lamda = eta * eta * epsilon * epsilon\r\n print('parameters==| d0=%f \\t |gamma=%s \\t |alpha=%f \\t |beta=%f \\t |lamda=%f'% (d0, gamma, alpha, beta, lamda))\r\n\r\n # Create a new model\r\n m = Model(\"mip1\")\r\n\r\n # Create variables\r\n T = m.addVar(vtype=GRB.INTEGER, name=\"T\")\r\n E = {}\r\n for k in range(K):\r\n E[k] = m.addVar(vtype=GRB.INTEGER, name=\"E\")\r\n\r\n t_max = m.addVar(vtype=GRB.CONTINUOUS, name=\"t_max\")\r\n a = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_1\")\r\n b = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_2\")\r\n c, mul, pow = {}, {}, {}\r\n for k in range(K):\r\n c[k] = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_3\")\r\n mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_4\")\r\n pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_5\")\r\n d = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_6\")\r\n E_min = m.addVar(vtype=GRB.INTEGER, name=\"E_min\")\r\n E_min_2 = m.addVar(vtype=GRB.INTEGER, name=\"E_min_2\")\r\n s = m.addVar(vtype=GRB.CONTINUOUS, name=\"R_sum\")\r\n\r\n # Set objective\r\n m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)\r\n\r\n # Add constraint: T_max >= E_k * R_k\r\n for k in range(K):\r\n m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], \"c0\")\r\n\r\n # Add constraint: a == \\sum E_k * E_k * R_k\r\n m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))\r\n\r\n for k in range(K):\r\n m.addConstr(mul[k] == E[k] * (E[k] - 1))\r\n m.addConstr(c[k] * c[k] == R[k])\r\n m.addConstr(pow[k] == mul[k] * c[k])\r\n # m.addConstr(E[k] >=1)\r\n m.addConstr(E_min <= E[k])\r\n\r\n m.addConstr(b == quicksum((pow[k] * pow[k]) for k in range(K)))\r\n m.addConstr(d * d == b)\r\n m.addConstr(s == alpha * a + D * D + beta * d)\r\n m.addConstr(E_min_2 == E_min * E_min)\r\n\r\n # Add constraint: x + y >= 1 R_e <= epsilon\r\n m.addConstr(s * s <= E_min_2 * T * 1, \"c1\")\r\n\r\n m.params.NonConvex = 2\r\n m.setParam(\"OutputFlag\", 0)\r\n m.params.TimeLimit = 40 # 限制求解时间为 100s\r\n m.optimize()\r\n local_epoch = []\r\n if m.status == GRB.OPTIMAL:\r\n print('solved!')\r\n Ex = m.getAttr('x', E)\r\n for k in range(K):\r\n if E[k].x > 0.0001:\r\n local_epoch.append(int(Ex[k]))\r\n else:\r\n local_epoch = [1 for i in range(K)]\r\n print('Optimization solution:', local_epoch)\r\n return local_epoch\r\n\r\n# K = 5\r\n# G = 0.23\r\n# L_max = 1.48\r\n# R = {0: 10, 1: 13, 2: 20, 3: 30, 4: 50} # imbalanced\r\n# N = 30000\r\n# eta = 0.5\r\n# print('parameters:', K, R, N, eta, L_max, G)\r\n# ll = solve_model(K, R, N, L_max, G)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Duck typing
Ref: http://www.voidspace.org.uk/python/articles/duck_typing.shtml
"""
##########
# mathmatic operator (syntactic sugar)
print 3 + 3
# same as >>>
print int.__add__(3, 3)
# <<<
# overload '+' operator
class Klass1(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __add__(self, other):
return self.a - other.b
class Klass2(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __add__(self, other):
return self.b - other.a
obj1 = Klass1(1, 2)
obj2 = Klass2(10, 20)
print obj1 + obj2
# same as >>>
print obj1.__add__(obj2)
# <<<
##########
# data access for sequence type objects(list, tuple) and mapping type object(dict)
# (syntactic sugar)
a = [0,1,2]
print a[0]
# same as >>>
print list.__getitem__(a, 0)
# <<<
b = {'a':0, 'b':1}
print b['a']
# same as >>>
print dict.__getitem__(b, 'a')
# <<<
##########
# function call
# callable checks where a var has __call__ attr.
def f(arg):
print arg
f(123)
# >>> 123
# same as >>>
f.__call__(123)
# >>> 123
# <<<
\
# 'Duck typing' happens because when we do var['member'] Python doesn't care what type object var is.
# All it cares is whether the call to its __getitem__ method returns anything sensible. If not - an error will be raised. Something like TypeError: Unsubscriptable object..
# This means you can create your own classes that have their own internal data structures - but are accessed using normal Python syntax. This is awfully convenient.
# isinstance(object, dict) returns True if object is a dictionary - or an instance of a subclass of dict.
# Instead of:
#
# if isinstance(object, dict):
# value = object[member]
#
# it is considered more pythonic to do :
#
# try:
# value = object[member]
# except TypeError:
# # do something else
#
# Our example above could become :
#
# if hasattr(object, 'keys'):
# value = object[member]
#
|
normal
|
{
"blob_id": "776470546585257bf06073e2d894e8a04cf2376d",
"index": 727,
"step-1": "\"\"\"\nDuck typing\nRef: http://www.voidspace.org.uk/python/articles/duck_typing.shtml\n\"\"\"\n\n##########\n# mathmatic operator (syntactic sugar)\nprint 3 + 3\n# same as >>>\nprint int.__add__(3, 3)\n# <<<\n\n# overload '+' operator\nclass Klass1(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def __add__(self, other):\n return self.a - other.b\n\nclass Klass2(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def __add__(self, other):\n return self.b - other.a\n\nobj1 = Klass1(1, 2)\nobj2 = Klass2(10, 20)\nprint obj1 + obj2\n# same as >>>\nprint obj1.__add__(obj2)\n# <<<\n\n\n##########\n# data access for sequence type objects(list, tuple) and mapping type object(dict)\n# (syntactic sugar)\na = [0,1,2]\nprint a[0]\n# same as >>>\nprint list.__getitem__(a, 0)\n# <<<\n\nb = {'a':0, 'b':1}\nprint b['a']\n# same as >>>\nprint dict.__getitem__(b, 'a')\n# <<<\n\n##########\n# function call\n# callable checks where a var has __call__ attr.\ndef f(arg):\n print arg\n\nf(123)\n# >>> 123\n# same as >>>\nf.__call__(123)\n# >>> 123\n# <<<\n\\\n\n\n# 'Duck typing' happens because when we do var['member'] Python doesn't care what type object var is.\n# All it cares is whether the call to its __getitem__ method returns anything sensible. If not - an error will be raised. Something like TypeError: Unsubscriptable object..\n# This means you can create your own classes that have their own internal data structures - but are accessed using normal Python syntax. This is awfully convenient.\n\n# isinstance(object, dict) returns True if object is a dictionary - or an instance of a subclass of dict.\n# Instead of:\n#\n# if isinstance(object, dict):\n# value = object[member]\n#\n# it is considered more pythonic to do :\n#\n# try:\n# value = object[member]\n# except TypeError:\n# # do something else\n#\n# Our example above could become :\n#\n# if hasattr(object, 'keys'):\n# value = object[member]\n#\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import tensorflow as tf
from keras import layers, Model, Input
from keras.utils import Progbar, to_categorical
from keras.datasets.mnist import load_data
import numpy as np
import matplotlib.pyplot as plt
import config
import datetime
img_height, img_width, _ = config.IMAGE_SHAPE
(X, Y), (_, _) = load_data()
X = X.reshape((-1, img_height, img_width, 1))
X = X.astype("float32")
Y = to_categorical(Y, num_classes=10, dtype="float32")
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7*7*128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding="same")
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding="same")
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation="tanh", padding="same")
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2D(256, kernel_size=5, padding="same")
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7*7*16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {"img_shape": self.img_shape, "name": self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
gen = Generator(name="generator")
disc = Discriminator(name="discriminator", img_shape=config.IMAGE_SHAPE)
gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
train_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
val_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
checkpoint = tf.train.Checkpoint(generator=gen,
gen_optimizer=gen_optimizer,
discriminator=disc,
disc_optimizer=disc_optimizer)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)
# creates a summary writer, writes a summary in a file to access on tensorboard later
summary_writer = tf.summary.create_file_writer(
logdir=config.LOG_DIR + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
'''LOSSES'''
def disc_loss(real_logits, fake_logits):
real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)
fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)
loss = 0.5*(real_loss + fake_loss)
return loss
def gen_loss(fake_logits):
loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)
return loss
# give signature to avoid retracing
signature = [
tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None, 10), dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.int64)
]
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
# writes a tensorboard summary (creates graph if scalar)
with summary_writer.as_default():
tf.summary.scalar("generator_loss", g_loss, step=epoch)
tf.summary.scalar("discriminator_loss", d_loss, step=epoch)
g_loss = tf.metrics.Mean()
d_loss = tf.metrics.Mean()
prog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])
if ckpt_manager.latest_checkpoint:
checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()
print(f"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}")
def train():
for epoch in range(config.EPOCHS):
print(f"\nEpoch {epoch+1}/{config.EPOCHS} :")
for n, (image, label) in enumerate(train_dataset):
train_step(image, label, epoch+1)
prog_bar.update(n)
if (epoch+1) % 5 == 0:
ckpt_manager.save()
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = (out.numpy() * 127.5) + 127.5 # de-process
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis("off")
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
if __name__ == "__main__":
train() # train loop
'''Test Code'''
# gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),
# tf.ones((config.BATCH_SIZE, 10))])
# disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),
# tf.ones((config.BATCH_SIZE, 10))])
#
# assert gen_out.shape == (32, 28, 28, 1)
|
normal
|
{
"blob_id": "e265b2b2ccc0841ccb8b766de4ae2a869f2d280d",
"index": 8326,
"step-1": "<mask token>\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n <mask token>\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),\n fake_logits)\n return loss\n\n\n<mask token>\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\ndef disc_loss(real_logits, fake_logits):\n real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits),\n real_logits)\n fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits),\n fake_logits)\n loss = 0.5 * (real_loss + fake_loss)\n return loss\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),\n fake_logits)\n return loss\n\n\n<mask token>\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef train():\n for epoch in range(config.EPOCHS):\n print(f'\\nEpoch {epoch + 1}/{config.EPOCHS} :')\n for n, (image, label) in enumerate(train_dataset):\n train_step(image, label, epoch + 1)\n prog_bar.update(n)\n if (epoch + 1) % 5 == 0:\n ckpt_manager.save()\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-5": "import tensorflow as tf\nfrom keras import layers, Model, Input\nfrom keras.utils import Progbar, to_categorical\nfrom keras.datasets.mnist import load_data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport config\nimport datetime\n\nimg_height, img_width, _ = config.IMAGE_SHAPE\n\n(X, Y), (_, _) = load_data()\nX = X.reshape((-1, img_height, img_width, 1))\nX = X.astype(\"float32\")\nY = to_categorical(Y, num_classes=10, dtype=\"float32\")\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7*7*128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding=\"same\")\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding=\"same\")\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding=\"same\")\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\"tanh\", padding=\"same\")\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding=\"same\")\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7*7*16)\n\n def call(self, inputs, training=None, mask=None):\n\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n\n return x\n\n def get_config(self):\n return {\"img_shape\": self.img_shape, \"name\": self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\ngen = Generator(name=\"generator\")\ndisc = Discriminator(name=\"discriminator\", img_shape=config.IMAGE_SHAPE)\n\ngen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)\ndisc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)\n\ndataset = tf.data.Dataset.from_tensor_slices((X, Y))\ntrain_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)\nval_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)\n\ncheckpoint = tf.train.Checkpoint(generator=gen,\n gen_optimizer=gen_optimizer,\n discriminator=disc,\n disc_optimizer=disc_optimizer)\nckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)\n\n# creates a summary writer, writes a summary in a file to access on tensorboard later\nsummary_writer = tf.summary.create_file_writer(\n logdir=config.LOG_DIR + \"fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n\n'''LOSSES'''\n\n\ndef disc_loss(real_logits, fake_logits):\n real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)\n fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)\n loss = 0.5*(real_loss + fake_loss)\n return loss\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)\n return loss\n\n\n# give signature to avoid retracing\n\nsignature = [\n tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),\n tf.TensorSpec(shape=(None, 10), dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.int64)\n]\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n\n # writes a tensorboard summary (creates graph if scalar)\n with summary_writer.as_default():\n tf.summary.scalar(\"generator_loss\", g_loss, step=epoch)\n tf.summary.scalar(\"discriminator_loss\", d_loss, step=epoch)\n\n\ng_loss = tf.metrics.Mean()\nd_loss = tf.metrics.Mean()\nprog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])\n\nif ckpt_manager.latest_checkpoint:\n checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()\n print(f\"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}\")\n\n\ndef train():\n for epoch in range(config.EPOCHS):\n print(f\"\\nEpoch {epoch+1}/{config.EPOCHS} :\")\n for n, (image, label) in enumerate(train_dataset):\n train_step(image, label, epoch+1)\n prog_bar.update(n)\n\n if (epoch+1) % 5 == 0:\n ckpt_manager.save()\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n\n out = gen([z, labels])\n out = (out.numpy() * 127.5) + 127.5 # de-process\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis(\"off\")\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\nif __name__ == \"__main__\":\n train() # train loop\n\n '''Test Code'''\n\n # gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),\n # tf.ones((config.BATCH_SIZE, 10))])\n # disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),\n # tf.ones((config.BATCH_SIZE, 10))])\n #\n # assert gen_out.shape == (32, 28, 28, 1)\n\n\n\n\n\n\n\n",
"step-ids": [
8,
12,
13,
15,
19
]
}
|
[
8,
12,
13,
15,
19
] |
<|reserved_special_token_0|>
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.
_clean_inheritance_tokens(), templateuri=u'/message.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e2e50'] = ns
ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x7ee9750'] = ns
ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/repository/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x8a2fd90'] = ns
ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/repository_actions_menu.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e21d0'] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, inherit(context), _template_uri)
def render_body(context, **pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
render_clone_str = _import_ns.get('render_clone_str', context.get(
'render_clone_str', UNDEFINED))
render_repository_type_select_field = _import_ns.get(
'render_repository_type_select_field', context.get(
'render_repository_type_select_field', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg',
UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
render_tool_shed_repository_actions = _import_ns.get(
'render_tool_shed_repository_actions', context.get(
'render_tool_shed_repository_actions', UNDEFINED))
is_malicious = _import_ns.get('is_malicious', context.get(
'is_malicious', UNDEFINED))
repository_type_select_field = _import_ns.get(
'repository_type_select_field', context.get(
'repository_type_select_field', UNDEFINED))
commit_message = _import_ns.get('commit_message', context.get(
'commit_message', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
is_new = repository.is_new(trans.app)
can_push = trans.app.security_agent.can_push(trans.app, trans.user,
repository)
can_download = not is_new and (not is_malicious or can_push)
can_browse_contents = not is_new
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key,
__M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',
'can_browse_contents', 'is_new', 'can_download'] if __M_key in
__M_locals_builtin_stored]))
__M_writer(u'\n\n')
__M_writer(unicode(render_tool_shed_repository_actions(repository)))
__M_writer(u'\n\n')
if message:
__M_writer(u' ')
__M_writer(unicode(render_msg(message, status)))
__M_writer(u'\n')
pass
__M_writer(u'\n')
if can_browse_contents:
__M_writer(
u""" <div class="toolForm">
<div class="toolFormTitle">Repository '"""
)
__M_writer(filters.html_escape(unicode(repository.name)))
__M_writer(u"' revision ")
__M_writer(filters.html_escape(unicode(repository.tip(trans.app))))
__M_writer(u' (repository tip)</div>\n')
if can_download:
__M_writer(
u""" <div class="form-row">
<label>Clone this repository:</label>
"""
)
__M_writer(unicode(render_clone_str(repository)))
__M_writer(u'\n </div>\n')
pass
__M_writer(u' <form name="repository_type">\n ')
__M_writer(unicode(render_repository_type_select_field(
repository_type_select_field, render_help=False)))
__M_writer(u'\n </form>\n')
if can_push:
__M_writer(
u' <form name="select_files_to_delete" id="select_files_to_delete" action="'
)
__M_writer(unicode(h.url_for(controller='repository',
action='select_files_to_delete', id=trans.security.
encode_id(repository.id))))
__M_writer(
u"""" method="post" >
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
<div class="toolParamHelp" style="clear: both;">
Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.
</div>
<input id="selected_files_to_delete" name="selected_files_to_delete" type="hidden" value=""/>
</div>
<div class="form-row">
<label>Message:</label>
<div class="form-row-input">
"""
)
if commit_message:
__M_writer(
u' <textarea name="commit_message" rows="3" cols="35">'
)
__M_writer(filters.html_escape(unicode(commit_message)))
__M_writer(u'</textarea>\n')
else:
__M_writer(
u""" <textarea name="commit_message" rows="3" cols="35"></textarea>
"""
)
pass
__M_writer(
u""" </div>
<div class="toolParamHelp" style="clear: both;">
This is the commit message for the mercurial change set that will be created if you delete selected files.
</div>
<div style="clear: both"></div>
</div>
<div class="form-row">
<input type="submit" name="select_files_to_delete_button" value="Delete selected files"/>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</form>
"""
)
else:
__M_writer(
u""" <div class="toolFormBody">
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</div>
"""
)
pass
__M_writer(u' </div>\n <p/>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inherit(context):
if context.get('use_panels'):
return '/webapps/tool_shed/base_panels.mako'
else:
return '/base.mako'
<|reserved_special_token_0|>
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.
_clean_inheritance_tokens(), templateuri=u'/message.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e2e50'] = ns
ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x7ee9750'] = ns
ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/repository/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x8a2fd90'] = ns
ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/repository_actions_menu.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e21d0'] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, inherit(context), _template_uri)
def render_body(context, **pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
render_clone_str = _import_ns.get('render_clone_str', context.get(
'render_clone_str', UNDEFINED))
render_repository_type_select_field = _import_ns.get(
'render_repository_type_select_field', context.get(
'render_repository_type_select_field', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg',
UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
render_tool_shed_repository_actions = _import_ns.get(
'render_tool_shed_repository_actions', context.get(
'render_tool_shed_repository_actions', UNDEFINED))
is_malicious = _import_ns.get('is_malicious', context.get(
'is_malicious', UNDEFINED))
repository_type_select_field = _import_ns.get(
'repository_type_select_field', context.get(
'repository_type_select_field', UNDEFINED))
commit_message = _import_ns.get('commit_message', context.get(
'commit_message', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
is_new = repository.is_new(trans.app)
can_push = trans.app.security_agent.can_push(trans.app, trans.user,
repository)
can_download = not is_new and (not is_malicious or can_push)
can_browse_contents = not is_new
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key,
__M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',
'can_browse_contents', 'is_new', 'can_download'] if __M_key in
__M_locals_builtin_stored]))
__M_writer(u'\n\n')
__M_writer(unicode(render_tool_shed_repository_actions(repository)))
__M_writer(u'\n\n')
if message:
__M_writer(u' ')
__M_writer(unicode(render_msg(message, status)))
__M_writer(u'\n')
pass
__M_writer(u'\n')
if can_browse_contents:
__M_writer(
u""" <div class="toolForm">
<div class="toolFormTitle">Repository '"""
)
__M_writer(filters.html_escape(unicode(repository.name)))
__M_writer(u"' revision ")
__M_writer(filters.html_escape(unicode(repository.tip(trans.app))))
__M_writer(u' (repository tip)</div>\n')
if can_download:
__M_writer(
u""" <div class="form-row">
<label>Clone this repository:</label>
"""
)
__M_writer(unicode(render_clone_str(repository)))
__M_writer(u'\n </div>\n')
pass
__M_writer(u' <form name="repository_type">\n ')
__M_writer(unicode(render_repository_type_select_field(
repository_type_select_field, render_help=False)))
__M_writer(u'\n </form>\n')
if can_push:
__M_writer(
u' <form name="select_files_to_delete" id="select_files_to_delete" action="'
)
__M_writer(unicode(h.url_for(controller='repository',
action='select_files_to_delete', id=trans.security.
encode_id(repository.id))))
__M_writer(
u"""" method="post" >
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
<div class="toolParamHelp" style="clear: both;">
Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.
</div>
<input id="selected_files_to_delete" name="selected_files_to_delete" type="hidden" value=""/>
</div>
<div class="form-row">
<label>Message:</label>
<div class="form-row-input">
"""
)
if commit_message:
__M_writer(
u' <textarea name="commit_message" rows="3" cols="35">'
)
__M_writer(filters.html_escape(unicode(commit_message)))
__M_writer(u'</textarea>\n')
else:
__M_writer(
u""" <textarea name="commit_message" rows="3" cols="35"></textarea>
"""
)
pass
__M_writer(
u""" </div>
<div class="toolParamHelp" style="clear: both;">
This is the commit message for the mercurial change set that will be created if you delete selected files.
</div>
<div style="clear: both"></div>
</div>
<div class="form-row">
<input type="submit" name="select_files_to_delete_button" value="Delete selected files"/>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</form>
"""
)
else:
__M_writer(
u""" <div class="toolFormBody">
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</div>
"""
)
pass
__M_writer(u' </div>\n <p/>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_0|>
def render_javascripts(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
common_javascripts = _import_ns.get('common_javascripts', context.
get('common_javascripts', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n ')
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n ')
__M_writer(unicode(h.js('libs/jquery/jquery.rating',
'libs/jquery/jquery-ui', 'libs/jquery/jquery.cookie',
'libs/jquery/jquery.dynatree')))
__M_writer(u'\n ')
__M_writer(unicode(common_javascripts(repository)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inherit(context):
if context.get('use_panels'):
return '/webapps/tool_shed/base_panels.mako'
else:
return '/base.mako'
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.
_clean_inheritance_tokens(), templateuri=u'/message.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e2e50'] = ns
ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x7ee9750'] = ns
ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/repository/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x8a2fd90'] = ns
ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/repository_actions_menu.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e21d0'] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, inherit(context), _template_uri)
def render_body(context, **pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
render_clone_str = _import_ns.get('render_clone_str', context.get(
'render_clone_str', UNDEFINED))
render_repository_type_select_field = _import_ns.get(
'render_repository_type_select_field', context.get(
'render_repository_type_select_field', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg',
UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
render_tool_shed_repository_actions = _import_ns.get(
'render_tool_shed_repository_actions', context.get(
'render_tool_shed_repository_actions', UNDEFINED))
is_malicious = _import_ns.get('is_malicious', context.get(
'is_malicious', UNDEFINED))
repository_type_select_field = _import_ns.get(
'repository_type_select_field', context.get(
'repository_type_select_field', UNDEFINED))
commit_message = _import_ns.get('commit_message', context.get(
'commit_message', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
is_new = repository.is_new(trans.app)
can_push = trans.app.security_agent.can_push(trans.app, trans.user,
repository)
can_download = not is_new and (not is_malicious or can_push)
can_browse_contents = not is_new
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key,
__M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',
'can_browse_contents', 'is_new', 'can_download'] if __M_key in
__M_locals_builtin_stored]))
__M_writer(u'\n\n')
__M_writer(unicode(render_tool_shed_repository_actions(repository)))
__M_writer(u'\n\n')
if message:
__M_writer(u' ')
__M_writer(unicode(render_msg(message, status)))
__M_writer(u'\n')
pass
__M_writer(u'\n')
if can_browse_contents:
__M_writer(
u""" <div class="toolForm">
<div class="toolFormTitle">Repository '"""
)
__M_writer(filters.html_escape(unicode(repository.name)))
__M_writer(u"' revision ")
__M_writer(filters.html_escape(unicode(repository.tip(trans.app))))
__M_writer(u' (repository tip)</div>\n')
if can_download:
__M_writer(
u""" <div class="form-row">
<label>Clone this repository:</label>
"""
)
__M_writer(unicode(render_clone_str(repository)))
__M_writer(u'\n </div>\n')
pass
__M_writer(u' <form name="repository_type">\n ')
__M_writer(unicode(render_repository_type_select_field(
repository_type_select_field, render_help=False)))
__M_writer(u'\n </form>\n')
if can_push:
__M_writer(
u' <form name="select_files_to_delete" id="select_files_to_delete" action="'
)
__M_writer(unicode(h.url_for(controller='repository',
action='select_files_to_delete', id=trans.security.
encode_id(repository.id))))
__M_writer(
u"""" method="post" >
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
<div class="toolParamHelp" style="clear: both;">
Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.
</div>
<input id="selected_files_to_delete" name="selected_files_to_delete" type="hidden" value=""/>
</div>
<div class="form-row">
<label>Message:</label>
<div class="form-row-input">
"""
)
if commit_message:
__M_writer(
u' <textarea name="commit_message" rows="3" cols="35">'
)
__M_writer(filters.html_escape(unicode(commit_message)))
__M_writer(u'</textarea>\n')
else:
__M_writer(
u""" <textarea name="commit_message" rows="3" cols="35"></textarea>
"""
)
pass
__M_writer(
u""" </div>
<div class="toolParamHelp" style="clear: both;">
This is the commit message for the mercurial change set that will be created if you delete selected files.
</div>
<div style="clear: both"></div>
</div>
<div class="form-row">
<input type="submit" name="select_files_to_delete_button" value="Delete selected files"/>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</form>
"""
)
else:
__M_writer(
u""" <div class="toolFormBody">
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</div>
"""
)
pass
__M_writer(u' </div>\n <p/>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_stylesheets(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n ')
__M_writer(unicode(parent.stylesheets()))
__M_writer(u'\n ')
__M_writer(unicode(h.css('jquery.rating', 'dynatree_skin/ui.dynatree'))
)
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_javascripts(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
common_javascripts = _import_ns.get('common_javascripts', context.
get('common_javascripts', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n ')
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n ')
__M_writer(unicode(h.js('libs/jquery/jquery.rating',
'libs/jquery/jquery-ui', 'libs/jquery/jquery.cookie',
'libs/jquery/jquery.dynatree')))
__M_writer(u'\n ')
__M_writer(unicode(common_javascripts(repository)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_1|>
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1383550959.038948
_template_filename = (
'templates/webapps/tool_shed/repository/browse_repository.mako')
_template_uri = '/webapps/tool_shed/repository/browse_repository.mako'
_template_cache = cache.Cache(__name__, _modified_time)
_source_encoding = 'ascii'
_exports = ['stylesheets', 'javascripts']
def inherit(context):
if context.get('use_panels'):
return '/webapps/tool_shed/base_panels.mako'
else:
return '/base.mako'
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.
_clean_inheritance_tokens(), templateuri=u'/message.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e2e50'] = ns
ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x7ee9750'] = ns
ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/repository/common.mako', callables=None,
calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x8a2fd90'] = ns
ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.
_clean_inheritance_tokens(), templateuri=
u'/webapps/tool_shed/common/repository_actions_menu.mako',
callables=None, calling_uri=_template_uri)
context.namespaces[__name__, '__anon_0x88e21d0'] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, inherit(context), _template_uri)
def render_body(context, **pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
render_clone_str = _import_ns.get('render_clone_str', context.get(
'render_clone_str', UNDEFINED))
render_repository_type_select_field = _import_ns.get(
'render_repository_type_select_field', context.get(
'render_repository_type_select_field', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg',
UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
render_tool_shed_repository_actions = _import_ns.get(
'render_tool_shed_repository_actions', context.get(
'render_tool_shed_repository_actions', UNDEFINED))
is_malicious = _import_ns.get('is_malicious', context.get(
'is_malicious', UNDEFINED))
repository_type_select_field = _import_ns.get(
'repository_type_select_field', context.get(
'repository_type_select_field', UNDEFINED))
commit_message = _import_ns.get('commit_message', context.get(
'commit_message', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
is_new = repository.is_new(trans.app)
can_push = trans.app.security_agent.can_push(trans.app, trans.user,
repository)
can_download = not is_new and (not is_malicious or can_push)
can_browse_contents = not is_new
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key,
__M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',
'can_browse_contents', 'is_new', 'can_download'] if __M_key in
__M_locals_builtin_stored]))
__M_writer(u'\n\n')
__M_writer(unicode(render_tool_shed_repository_actions(repository)))
__M_writer(u'\n\n')
if message:
__M_writer(u' ')
__M_writer(unicode(render_msg(message, status)))
__M_writer(u'\n')
pass
__M_writer(u'\n')
if can_browse_contents:
__M_writer(
u""" <div class="toolForm">
<div class="toolFormTitle">Repository '"""
)
__M_writer(filters.html_escape(unicode(repository.name)))
__M_writer(u"' revision ")
__M_writer(filters.html_escape(unicode(repository.tip(trans.app))))
__M_writer(u' (repository tip)</div>\n')
if can_download:
__M_writer(
u""" <div class="form-row">
<label>Clone this repository:</label>
"""
)
__M_writer(unicode(render_clone_str(repository)))
__M_writer(u'\n </div>\n')
pass
__M_writer(u' <form name="repository_type">\n ')
__M_writer(unicode(render_repository_type_select_field(
repository_type_select_field, render_help=False)))
__M_writer(u'\n </form>\n')
if can_push:
__M_writer(
u' <form name="select_files_to_delete" id="select_files_to_delete" action="'
)
__M_writer(unicode(h.url_for(controller='repository',
action='select_files_to_delete', id=trans.security.
encode_id(repository.id))))
__M_writer(
u"""" method="post" >
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
<div class="toolParamHelp" style="clear: both;">
Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.
</div>
<input id="selected_files_to_delete" name="selected_files_to_delete" type="hidden" value=""/>
</div>
<div class="form-row">
<label>Message:</label>
<div class="form-row-input">
"""
)
if commit_message:
__M_writer(
u' <textarea name="commit_message" rows="3" cols="35">'
)
__M_writer(filters.html_escape(unicode(commit_message)))
__M_writer(u'</textarea>\n')
else:
__M_writer(
u""" <textarea name="commit_message" rows="3" cols="35"></textarea>
"""
)
pass
__M_writer(
u""" </div>
<div class="toolParamHelp" style="clear: both;">
This is the commit message for the mercurial change set that will be created if you delete selected files.
</div>
<div style="clear: both"></div>
</div>
<div class="form-row">
<input type="submit" name="select_files_to_delete_button" value="Delete selected files"/>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</form>
"""
)
else:
__M_writer(
u""" <div class="toolFormBody">
<div class="form-row" >
<label>Contents:</label>
<div id="tree" >
Loading...
</div>
</div>
<div class="form-row">
<div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>
</div>
</div>
"""
)
pass
__M_writer(u' </div>\n <p/>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_stylesheets(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n ')
__M_writer(unicode(parent.stylesheets()))
__M_writer(u'\n ')
__M_writer(unicode(h.css('jquery.rating', 'dynatree_skin/ui.dynatree'))
)
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_javascripts(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,
[u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,
[u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,
[u'render_tool_shed_repository_actions'])
common_javascripts = _import_ns.get('common_javascripts', context.
get('common_javascripts', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository',
UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
__M_writer(u'\n ')
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n ')
__M_writer(unicode(h.js('libs/jquery/jquery.rating',
'libs/jquery/jquery-ui', 'libs/jquery/jquery.cookie',
'libs/jquery/jquery.dynatree')))
__M_writer(u'\n ')
__M_writer(unicode(common_javascripts(repository)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
<|reserved_special_token_1|>
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1383550959.0389481
_template_filename='templates/webapps/tool_shed/repository/browse_repository.mako'
_template_uri='/webapps/tool_shed/repository/browse_repository.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['stylesheets', 'javascripts']
# SOURCE LINE 7
def inherit(context):
if context.get('use_panels'):
return '/webapps/tool_shed/base_panels.mako'
else:
return '/base.mako'
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
# SOURCE LINE 2
ns = runtime.TemplateNamespace('__anon_0x88e2e50', context._clean_inheritance_tokens(), templateuri=u'/message.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x88e2e50')] = ns
# SOURCE LINE 4
ns = runtime.TemplateNamespace('__anon_0x7ee9750', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/common/common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7ee9750')] = ns
# SOURCE LINE 5
ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/repository/common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x8a2fd90')] = ns
# SOURCE LINE 3
ns = runtime.TemplateNamespace('__anon_0x88e21d0', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/common/repository_actions_menu.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x88e21d0')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, (inherit(context)), _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
render_clone_str = _import_ns.get('render_clone_str', context.get('render_clone_str', UNDEFINED))
render_repository_type_select_field = _import_ns.get('render_repository_type_select_field', context.get('render_repository_type_select_field', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
render_tool_shed_repository_actions = _import_ns.get('render_tool_shed_repository_actions', context.get('render_tool_shed_repository_actions', UNDEFINED))
is_malicious = _import_ns.get('is_malicious', context.get('is_malicious', UNDEFINED))
repository_type_select_field = _import_ns.get('repository_type_select_field', context.get('repository_type_select_field', UNDEFINED))
commit_message = _import_ns.get('commit_message', context.get('commit_message', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n')
# SOURCE LINE 2
__M_writer(u'\n')
# SOURCE LINE 3
__M_writer(u'\n')
# SOURCE LINE 4
__M_writer(u'\n')
# SOURCE LINE 5
__M_writer(u'\n\n')
# SOURCE LINE 13
__M_writer(u'\n')
# SOURCE LINE 14
__M_writer(u'\n\n')
# SOURCE LINE 19
__M_writer(u'\n\n')
# SOURCE LINE 25
__M_writer(u'\n\n')
# SOURCE LINE 27
is_new = repository.is_new( trans.app )
can_push = trans.app.security_agent.can_push( trans.app, trans.user, repository )
can_download = not is_new and ( not is_malicious or can_push )
can_browse_contents = not is_new
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push','can_browse_contents','is_new','can_download'] if __M_key in __M_locals_builtin_stored]))
# SOURCE LINE 32
__M_writer(u'\n\n')
# SOURCE LINE 34
__M_writer(unicode(render_tool_shed_repository_actions( repository )))
__M_writer(u'\n\n')
# SOURCE LINE 36
if message:
# SOURCE LINE 37
__M_writer(u' ')
__M_writer(unicode(render_msg( message, status )))
__M_writer(u'\n')
pass
# SOURCE LINE 39
__M_writer(u'\n')
# SOURCE LINE 40
if can_browse_contents:
# SOURCE LINE 41
__M_writer(u' <div class="toolForm">\n <div class="toolFormTitle">Repository \'')
# SOURCE LINE 42
__M_writer(filters.html_escape(unicode(repository.name )))
__M_writer(u"' revision ")
__M_writer(filters.html_escape(unicode(repository.tip( trans.app ) )))
__M_writer(u' (repository tip)</div>\n')
# SOURCE LINE 43
if can_download:
# SOURCE LINE 44
__M_writer(u' <div class="form-row">\n <label>Clone this repository:</label>\n ')
# SOURCE LINE 46
__M_writer(unicode(render_clone_str( repository )))
__M_writer(u'\n </div>\n')
pass
# SOURCE LINE 49
__M_writer(u' <form name="repository_type">\n ')
# SOURCE LINE 50
__M_writer(unicode(render_repository_type_select_field( repository_type_select_field, render_help=False )))
__M_writer(u'\n </form>\n')
# SOURCE LINE 52
if can_push:
# SOURCE LINE 53
__M_writer(u' <form name="select_files_to_delete" id="select_files_to_delete" action="')
__M_writer(unicode(h.url_for( controller='repository', action='select_files_to_delete', id=trans.security.encode_id( repository.id ))))
__M_writer(u'" method="post" >\n <div class="form-row" >\n <label>Contents:</label>\n <div id="tree" >\n Loading...\n </div>\n <div class="toolParamHelp" style="clear: both;">\n Click on a file to display it\'s contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\n </div>\n <input id="selected_files_to_delete" name="selected_files_to_delete" type="hidden" value=""/>\n </div>\n <div class="form-row">\n <label>Message:</label>\n <div class="form-row-input">\n')
# SOURCE LINE 67
if commit_message:
# SOURCE LINE 68
__M_writer(u' <textarea name="commit_message" rows="3" cols="35">')
__M_writer(filters.html_escape(unicode(commit_message )))
__M_writer(u'</textarea>\n')
# SOURCE LINE 69
else:
# SOURCE LINE 70
__M_writer(u' <textarea name="commit_message" rows="3" cols="35"></textarea>\n')
pass
# SOURCE LINE 72
__M_writer(u' </div>\n <div class="toolParamHelp" style="clear: both;">\n This is the commit message for the mercurial change set that will be created if you delete selected files.\n </div>\n <div style="clear: both"></div>\n </div>\n <div class="form-row">\n <input type="submit" name="select_files_to_delete_button" value="Delete selected files"/>\n </div>\n <div class="form-row">\n <div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>\n </div>\n </form>\n')
# SOURCE LINE 85
else:
# SOURCE LINE 86
__M_writer(u' <div class="toolFormBody">\n <div class="form-row" >\n <label>Contents:</label>\n <div id="tree" >\n Loading...\n </div>\n </div>\n <div class="form-row">\n <div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>\n </div>\n </div>\n')
pass
# SOURCE LINE 98
__M_writer(u' </div>\n <p/>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_stylesheets(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 16
__M_writer(u'\n ')
# SOURCE LINE 17
__M_writer(unicode(parent.stylesheets()))
__M_writer(u'\n ')
# SOURCE LINE 18
__M_writer(unicode(h.css( "jquery.rating", "dynatree_skin/ui.dynatree" )))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_javascripts(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])
common_javascripts = _import_ns.get('common_javascripts', context.get('common_javascripts', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 21
__M_writer(u'\n ')
# SOURCE LINE 22
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n ')
# SOURCE LINE 23
__M_writer(unicode(h.js( "libs/jquery/jquery.rating", "libs/jquery/jquery-ui", "libs/jquery/jquery.cookie", "libs/jquery/jquery.dynatree" )))
__M_writer(u'\n ')
# SOURCE LINE 24
__M_writer(unicode(common_javascripts(repository)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
|
flexible
|
{
"blob_id": "fd54bbfbc81aec371ad6c82bf402a5a3673a9f24",
"index": 8892,
"step-1": "<mask token>\n\n\ndef _mako_generate_namespaces(context):\n ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.\n _clean_inheritance_tokens(), templateuri=u'/message.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e2e50'] = ns\n ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x7ee9750'] = ns\n ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/repository/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x8a2fd90'] = ns\n ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/repository_actions_menu.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e21d0'] = ns\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, inherit(context), _template_uri)\n\n\ndef render_body(context, **pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n status = _import_ns.get('status', context.get('status', UNDEFINED))\n render_clone_str = _import_ns.get('render_clone_str', context.get(\n 'render_clone_str', UNDEFINED))\n render_repository_type_select_field = _import_ns.get(\n 'render_repository_type_select_field', context.get(\n 'render_repository_type_select_field', UNDEFINED))\n render_msg = _import_ns.get('render_msg', context.get('render_msg',\n UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n render_tool_shed_repository_actions = _import_ns.get(\n 'render_tool_shed_repository_actions', context.get(\n 'render_tool_shed_repository_actions', UNDEFINED))\n is_malicious = _import_ns.get('is_malicious', context.get(\n 'is_malicious', UNDEFINED))\n repository_type_select_field = _import_ns.get(\n 'repository_type_select_field', context.get(\n 'repository_type_select_field', UNDEFINED))\n commit_message = _import_ns.get('commit_message', context.get(\n 'commit_message', UNDEFINED))\n message = _import_ns.get('message', context.get('message', UNDEFINED))\n trans = _import_ns.get('trans', context.get('trans', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n is_new = repository.is_new(trans.app)\n can_push = trans.app.security_agent.can_push(trans.app, trans.user,\n repository)\n can_download = not is_new and (not is_malicious or can_push)\n can_browse_contents = not is_new\n __M_locals_builtin_stored = __M_locals_builtin()\n __M_locals.update(__M_dict_builtin([(__M_key,\n __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',\n 'can_browse_contents', 'is_new', 'can_download'] if __M_key in\n __M_locals_builtin_stored]))\n __M_writer(u'\\n\\n')\n __M_writer(unicode(render_tool_shed_repository_actions(repository)))\n __M_writer(u'\\n\\n')\n if message:\n __M_writer(u' ')\n __M_writer(unicode(render_msg(message, status)))\n __M_writer(u'\\n')\n pass\n __M_writer(u'\\n')\n if can_browse_contents:\n __M_writer(\n u\"\"\" <div class=\"toolForm\">\n <div class=\"toolFormTitle\">Repository '\"\"\"\n )\n __M_writer(filters.html_escape(unicode(repository.name)))\n __M_writer(u\"' revision \")\n __M_writer(filters.html_escape(unicode(repository.tip(trans.app))))\n __M_writer(u' (repository tip)</div>\\n')\n if can_download:\n __M_writer(\n u\"\"\" <div class=\"form-row\">\n <label>Clone this repository:</label>\n \"\"\"\n )\n __M_writer(unicode(render_clone_str(repository)))\n __M_writer(u'\\n </div>\\n')\n pass\n __M_writer(u' <form name=\"repository_type\">\\n ')\n __M_writer(unicode(render_repository_type_select_field(\n repository_type_select_field, render_help=False)))\n __M_writer(u'\\n </form>\\n')\n if can_push:\n __M_writer(\n u' <form name=\"select_files_to_delete\" id=\"select_files_to_delete\" action=\"'\n )\n __M_writer(unicode(h.url_for(controller='repository',\n action='select_files_to_delete', id=trans.security.\n encode_id(repository.id))))\n __M_writer(\n u\"\"\"\" method=\"post\" >\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\n </div>\n <input id=\"selected_files_to_delete\" name=\"selected_files_to_delete\" type=\"hidden\" value=\"\"/>\n </div>\n <div class=\"form-row\">\n <label>Message:</label>\n <div class=\"form-row-input\">\n\"\"\"\n )\n if commit_message:\n __M_writer(\n u' <textarea name=\"commit_message\" rows=\"3\" cols=\"35\">'\n )\n __M_writer(filters.html_escape(unicode(commit_message)))\n __M_writer(u'</textarea>\\n')\n else:\n __M_writer(\n u\"\"\" <textarea name=\"commit_message\" rows=\"3\" cols=\"35\"></textarea>\n\"\"\"\n )\n pass\n __M_writer(\n u\"\"\" </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n This is the commit message for the mercurial change set that will be created if you delete selected files.\n </div>\n <div style=\"clear: both\"></div>\n </div>\n <div class=\"form-row\">\n <input type=\"submit\" name=\"select_files_to_delete_button\" value=\"Delete selected files\"/>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </form>\n\"\"\"\n )\n else:\n __M_writer(\n u\"\"\" <div class=\"toolFormBody\">\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </div>\n\"\"\"\n )\n pass\n __M_writer(u' </div>\\n <p/>\\n')\n pass\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef inherit(context):\n if context.get('use_panels'):\n return '/webapps/tool_shed/base_panels.mako'\n else:\n return '/base.mako'\n\n\n<mask token>\n\n\ndef _mako_generate_namespaces(context):\n ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.\n _clean_inheritance_tokens(), templateuri=u'/message.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e2e50'] = ns\n ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x7ee9750'] = ns\n ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/repository/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x8a2fd90'] = ns\n ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/repository_actions_menu.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e21d0'] = ns\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, inherit(context), _template_uri)\n\n\ndef render_body(context, **pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n status = _import_ns.get('status', context.get('status', UNDEFINED))\n render_clone_str = _import_ns.get('render_clone_str', context.get(\n 'render_clone_str', UNDEFINED))\n render_repository_type_select_field = _import_ns.get(\n 'render_repository_type_select_field', context.get(\n 'render_repository_type_select_field', UNDEFINED))\n render_msg = _import_ns.get('render_msg', context.get('render_msg',\n UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n render_tool_shed_repository_actions = _import_ns.get(\n 'render_tool_shed_repository_actions', context.get(\n 'render_tool_shed_repository_actions', UNDEFINED))\n is_malicious = _import_ns.get('is_malicious', context.get(\n 'is_malicious', UNDEFINED))\n repository_type_select_field = _import_ns.get(\n 'repository_type_select_field', context.get(\n 'repository_type_select_field', UNDEFINED))\n commit_message = _import_ns.get('commit_message', context.get(\n 'commit_message', UNDEFINED))\n message = _import_ns.get('message', context.get('message', UNDEFINED))\n trans = _import_ns.get('trans', context.get('trans', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n is_new = repository.is_new(trans.app)\n can_push = trans.app.security_agent.can_push(trans.app, trans.user,\n repository)\n can_download = not is_new and (not is_malicious or can_push)\n can_browse_contents = not is_new\n __M_locals_builtin_stored = __M_locals_builtin()\n __M_locals.update(__M_dict_builtin([(__M_key,\n __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',\n 'can_browse_contents', 'is_new', 'can_download'] if __M_key in\n __M_locals_builtin_stored]))\n __M_writer(u'\\n\\n')\n __M_writer(unicode(render_tool_shed_repository_actions(repository)))\n __M_writer(u'\\n\\n')\n if message:\n __M_writer(u' ')\n __M_writer(unicode(render_msg(message, status)))\n __M_writer(u'\\n')\n pass\n __M_writer(u'\\n')\n if can_browse_contents:\n __M_writer(\n u\"\"\" <div class=\"toolForm\">\n <div class=\"toolFormTitle\">Repository '\"\"\"\n )\n __M_writer(filters.html_escape(unicode(repository.name)))\n __M_writer(u\"' revision \")\n __M_writer(filters.html_escape(unicode(repository.tip(trans.app))))\n __M_writer(u' (repository tip)</div>\\n')\n if can_download:\n __M_writer(\n u\"\"\" <div class=\"form-row\">\n <label>Clone this repository:</label>\n \"\"\"\n )\n __M_writer(unicode(render_clone_str(repository)))\n __M_writer(u'\\n </div>\\n')\n pass\n __M_writer(u' <form name=\"repository_type\">\\n ')\n __M_writer(unicode(render_repository_type_select_field(\n repository_type_select_field, render_help=False)))\n __M_writer(u'\\n </form>\\n')\n if can_push:\n __M_writer(\n u' <form name=\"select_files_to_delete\" id=\"select_files_to_delete\" action=\"'\n )\n __M_writer(unicode(h.url_for(controller='repository',\n action='select_files_to_delete', id=trans.security.\n encode_id(repository.id))))\n __M_writer(\n u\"\"\"\" method=\"post\" >\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\n </div>\n <input id=\"selected_files_to_delete\" name=\"selected_files_to_delete\" type=\"hidden\" value=\"\"/>\n </div>\n <div class=\"form-row\">\n <label>Message:</label>\n <div class=\"form-row-input\">\n\"\"\"\n )\n if commit_message:\n __M_writer(\n u' <textarea name=\"commit_message\" rows=\"3\" cols=\"35\">'\n )\n __M_writer(filters.html_escape(unicode(commit_message)))\n __M_writer(u'</textarea>\\n')\n else:\n __M_writer(\n u\"\"\" <textarea name=\"commit_message\" rows=\"3\" cols=\"35\"></textarea>\n\"\"\"\n )\n pass\n __M_writer(\n u\"\"\" </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n This is the commit message for the mercurial change set that will be created if you delete selected files.\n </div>\n <div style=\"clear: both\"></div>\n </div>\n <div class=\"form-row\">\n <input type=\"submit\" name=\"select_files_to_delete_button\" value=\"Delete selected files\"/>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </form>\n\"\"\"\n )\n else:\n __M_writer(\n u\"\"\" <div class=\"toolFormBody\">\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </div>\n\"\"\"\n )\n pass\n __M_writer(u' </div>\\n <p/>\\n')\n pass\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n<mask token>\n\n\ndef render_javascripts(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n common_javascripts = _import_ns.get('common_javascripts', context.\n get('common_javascripts', UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n ')\n __M_writer(unicode(parent.javascripts()))\n __M_writer(u'\\n ')\n __M_writer(unicode(h.js('libs/jquery/jquery.rating',\n 'libs/jquery/jquery-ui', 'libs/jquery/jquery.cookie',\n 'libs/jquery/jquery.dynatree')))\n __M_writer(u'\\n ')\n __M_writer(unicode(common_javascripts(repository)))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n",
"step-3": "<mask token>\n\n\ndef inherit(context):\n if context.get('use_panels'):\n return '/webapps/tool_shed/base_panels.mako'\n else:\n return '/base.mako'\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.\n _clean_inheritance_tokens(), templateuri=u'/message.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e2e50'] = ns\n ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x7ee9750'] = ns\n ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/repository/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x8a2fd90'] = ns\n ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/repository_actions_menu.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e21d0'] = ns\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, inherit(context), _template_uri)\n\n\ndef render_body(context, **pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n status = _import_ns.get('status', context.get('status', UNDEFINED))\n render_clone_str = _import_ns.get('render_clone_str', context.get(\n 'render_clone_str', UNDEFINED))\n render_repository_type_select_field = _import_ns.get(\n 'render_repository_type_select_field', context.get(\n 'render_repository_type_select_field', UNDEFINED))\n render_msg = _import_ns.get('render_msg', context.get('render_msg',\n UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n render_tool_shed_repository_actions = _import_ns.get(\n 'render_tool_shed_repository_actions', context.get(\n 'render_tool_shed_repository_actions', UNDEFINED))\n is_malicious = _import_ns.get('is_malicious', context.get(\n 'is_malicious', UNDEFINED))\n repository_type_select_field = _import_ns.get(\n 'repository_type_select_field', context.get(\n 'repository_type_select_field', UNDEFINED))\n commit_message = _import_ns.get('commit_message', context.get(\n 'commit_message', UNDEFINED))\n message = _import_ns.get('message', context.get('message', UNDEFINED))\n trans = _import_ns.get('trans', context.get('trans', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n is_new = repository.is_new(trans.app)\n can_push = trans.app.security_agent.can_push(trans.app, trans.user,\n repository)\n can_download = not is_new and (not is_malicious or can_push)\n can_browse_contents = not is_new\n __M_locals_builtin_stored = __M_locals_builtin()\n __M_locals.update(__M_dict_builtin([(__M_key,\n __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',\n 'can_browse_contents', 'is_new', 'can_download'] if __M_key in\n __M_locals_builtin_stored]))\n __M_writer(u'\\n\\n')\n __M_writer(unicode(render_tool_shed_repository_actions(repository)))\n __M_writer(u'\\n\\n')\n if message:\n __M_writer(u' ')\n __M_writer(unicode(render_msg(message, status)))\n __M_writer(u'\\n')\n pass\n __M_writer(u'\\n')\n if can_browse_contents:\n __M_writer(\n u\"\"\" <div class=\"toolForm\">\n <div class=\"toolFormTitle\">Repository '\"\"\"\n )\n __M_writer(filters.html_escape(unicode(repository.name)))\n __M_writer(u\"' revision \")\n __M_writer(filters.html_escape(unicode(repository.tip(trans.app))))\n __M_writer(u' (repository tip)</div>\\n')\n if can_download:\n __M_writer(\n u\"\"\" <div class=\"form-row\">\n <label>Clone this repository:</label>\n \"\"\"\n )\n __M_writer(unicode(render_clone_str(repository)))\n __M_writer(u'\\n </div>\\n')\n pass\n __M_writer(u' <form name=\"repository_type\">\\n ')\n __M_writer(unicode(render_repository_type_select_field(\n repository_type_select_field, render_help=False)))\n __M_writer(u'\\n </form>\\n')\n if can_push:\n __M_writer(\n u' <form name=\"select_files_to_delete\" id=\"select_files_to_delete\" action=\"'\n )\n __M_writer(unicode(h.url_for(controller='repository',\n action='select_files_to_delete', id=trans.security.\n encode_id(repository.id))))\n __M_writer(\n u\"\"\"\" method=\"post\" >\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\n </div>\n <input id=\"selected_files_to_delete\" name=\"selected_files_to_delete\" type=\"hidden\" value=\"\"/>\n </div>\n <div class=\"form-row\">\n <label>Message:</label>\n <div class=\"form-row-input\">\n\"\"\"\n )\n if commit_message:\n __M_writer(\n u' <textarea name=\"commit_message\" rows=\"3\" cols=\"35\">'\n )\n __M_writer(filters.html_escape(unicode(commit_message)))\n __M_writer(u'</textarea>\\n')\n else:\n __M_writer(\n u\"\"\" <textarea name=\"commit_message\" rows=\"3\" cols=\"35\"></textarea>\n\"\"\"\n )\n pass\n __M_writer(\n u\"\"\" </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n This is the commit message for the mercurial change set that will be created if you delete selected files.\n </div>\n <div style=\"clear: both\"></div>\n </div>\n <div class=\"form-row\">\n <input type=\"submit\" name=\"select_files_to_delete_button\" value=\"Delete selected files\"/>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </form>\n\"\"\"\n )\n else:\n __M_writer(\n u\"\"\" <div class=\"toolFormBody\">\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </div>\n\"\"\"\n )\n pass\n __M_writer(u' </div>\\n <p/>\\n')\n pass\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_stylesheets(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n ')\n __M_writer(unicode(parent.stylesheets()))\n __M_writer(u'\\n ')\n __M_writer(unicode(h.css('jquery.rating', 'dynatree_skin/ui.dynatree'))\n )\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_javascripts(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n common_javascripts = _import_ns.get('common_javascripts', context.\n get('common_javascripts', UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n ')\n __M_writer(unicode(parent.javascripts()))\n __M_writer(u'\\n ')\n __M_writer(unicode(h.js('libs/jquery/jquery.rating',\n 'libs/jquery/jquery-ui', 'libs/jquery/jquery.cookie',\n 'libs/jquery/jquery.dynatree')))\n __M_writer(u'\\n ')\n __M_writer(unicode(common_javascripts(repository)))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n",
"step-4": "from mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1383550959.038948\n_template_filename = (\n 'templates/webapps/tool_shed/repository/browse_repository.mako')\n_template_uri = '/webapps/tool_shed/repository/browse_repository.mako'\n_template_cache = cache.Cache(__name__, _modified_time)\n_source_encoding = 'ascii'\n_exports = ['stylesheets', 'javascripts']\n\n\ndef inherit(context):\n if context.get('use_panels'):\n return '/webapps/tool_shed/base_panels.mako'\n else:\n return '/base.mako'\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[__name__, name]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[__name__, name]\n\n\ndef _mako_generate_namespaces(context):\n ns = runtime.TemplateNamespace('__anon_0x88e2e50', context.\n _clean_inheritance_tokens(), templateuri=u'/message.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e2e50'] = ns\n ns = runtime.TemplateNamespace('__anon_0x7ee9750', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x7ee9750'] = ns\n ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/repository/common.mako', callables=None,\n calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x8a2fd90'] = ns\n ns = runtime.TemplateNamespace('__anon_0x88e21d0', context.\n _clean_inheritance_tokens(), templateuri=\n u'/webapps/tool_shed/common/repository_actions_menu.mako',\n callables=None, calling_uri=_template_uri)\n context.namespaces[__name__, '__anon_0x88e21d0'] = ns\n\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, inherit(context), _template_uri)\n\n\ndef render_body(context, **pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n status = _import_ns.get('status', context.get('status', UNDEFINED))\n render_clone_str = _import_ns.get('render_clone_str', context.get(\n 'render_clone_str', UNDEFINED))\n render_repository_type_select_field = _import_ns.get(\n 'render_repository_type_select_field', context.get(\n 'render_repository_type_select_field', UNDEFINED))\n render_msg = _import_ns.get('render_msg', context.get('render_msg',\n UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n render_tool_shed_repository_actions = _import_ns.get(\n 'render_tool_shed_repository_actions', context.get(\n 'render_tool_shed_repository_actions', UNDEFINED))\n is_malicious = _import_ns.get('is_malicious', context.get(\n 'is_malicious', UNDEFINED))\n repository_type_select_field = _import_ns.get(\n 'repository_type_select_field', context.get(\n 'repository_type_select_field', UNDEFINED))\n commit_message = _import_ns.get('commit_message', context.get(\n 'commit_message', UNDEFINED))\n message = _import_ns.get('message', context.get('message', UNDEFINED))\n trans = _import_ns.get('trans', context.get('trans', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n __M_writer(u'\\n\\n')\n is_new = repository.is_new(trans.app)\n can_push = trans.app.security_agent.can_push(trans.app, trans.user,\n repository)\n can_download = not is_new and (not is_malicious or can_push)\n can_browse_contents = not is_new\n __M_locals_builtin_stored = __M_locals_builtin()\n __M_locals.update(__M_dict_builtin([(__M_key,\n __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push',\n 'can_browse_contents', 'is_new', 'can_download'] if __M_key in\n __M_locals_builtin_stored]))\n __M_writer(u'\\n\\n')\n __M_writer(unicode(render_tool_shed_repository_actions(repository)))\n __M_writer(u'\\n\\n')\n if message:\n __M_writer(u' ')\n __M_writer(unicode(render_msg(message, status)))\n __M_writer(u'\\n')\n pass\n __M_writer(u'\\n')\n if can_browse_contents:\n __M_writer(\n u\"\"\" <div class=\"toolForm\">\n <div class=\"toolFormTitle\">Repository '\"\"\"\n )\n __M_writer(filters.html_escape(unicode(repository.name)))\n __M_writer(u\"' revision \")\n __M_writer(filters.html_escape(unicode(repository.tip(trans.app))))\n __M_writer(u' (repository tip)</div>\\n')\n if can_download:\n __M_writer(\n u\"\"\" <div class=\"form-row\">\n <label>Clone this repository:</label>\n \"\"\"\n )\n __M_writer(unicode(render_clone_str(repository)))\n __M_writer(u'\\n </div>\\n')\n pass\n __M_writer(u' <form name=\"repository_type\">\\n ')\n __M_writer(unicode(render_repository_type_select_field(\n repository_type_select_field, render_help=False)))\n __M_writer(u'\\n </form>\\n')\n if can_push:\n __M_writer(\n u' <form name=\"select_files_to_delete\" id=\"select_files_to_delete\" action=\"'\n )\n __M_writer(unicode(h.url_for(controller='repository',\n action='select_files_to_delete', id=trans.security.\n encode_id(repository.id))))\n __M_writer(\n u\"\"\"\" method=\"post\" >\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n Click on a file to display it's contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\n </div>\n <input id=\"selected_files_to_delete\" name=\"selected_files_to_delete\" type=\"hidden\" value=\"\"/>\n </div>\n <div class=\"form-row\">\n <label>Message:</label>\n <div class=\"form-row-input\">\n\"\"\"\n )\n if commit_message:\n __M_writer(\n u' <textarea name=\"commit_message\" rows=\"3\" cols=\"35\">'\n )\n __M_writer(filters.html_escape(unicode(commit_message)))\n __M_writer(u'</textarea>\\n')\n else:\n __M_writer(\n u\"\"\" <textarea name=\"commit_message\" rows=\"3\" cols=\"35\"></textarea>\n\"\"\"\n )\n pass\n __M_writer(\n u\"\"\" </div>\n <div class=\"toolParamHelp\" style=\"clear: both;\">\n This is the commit message for the mercurial change set that will be created if you delete selected files.\n </div>\n <div style=\"clear: both\"></div>\n </div>\n <div class=\"form-row\">\n <input type=\"submit\" name=\"select_files_to_delete_button\" value=\"Delete selected files\"/>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </form>\n\"\"\"\n )\n else:\n __M_writer(\n u\"\"\" <div class=\"toolFormBody\">\n <div class=\"form-row\" >\n <label>Contents:</label>\n <div id=\"tree\" >\n Loading...\n </div>\n </div>\n <div class=\"form-row\">\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\n </div>\n </div>\n\"\"\"\n )\n pass\n __M_writer(u' </div>\\n <p/>\\n')\n pass\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_stylesheets(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n ')\n __M_writer(unicode(parent.stylesheets()))\n __M_writer(u'\\n ')\n __M_writer(unicode(h.css('jquery.rating', 'dynatree_skin/ui.dynatree'))\n )\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_javascripts(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns,\n [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns,\n [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns,\n [u'render_tool_shed_repository_actions'])\n common_javascripts = _import_ns.get('common_javascripts', context.\n get('common_javascripts', UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository',\n UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n __M_writer(u'\\n ')\n __M_writer(unicode(parent.javascripts()))\n __M_writer(u'\\n ')\n __M_writer(unicode(h.js('libs/jquery/jquery.rating',\n 'libs/jquery/jquery-ui', 'libs/jquery/jquery.cookie',\n 'libs/jquery/jquery.dynatree')))\n __M_writer(u'\\n ')\n __M_writer(unicode(common_javascripts(repository)))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n",
"step-5": "# -*- encoding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1383550959.0389481\n_template_filename='templates/webapps/tool_shed/repository/browse_repository.mako'\n_template_uri='/webapps/tool_shed/repository/browse_repository.mako'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='ascii'\n_exports = ['stylesheets', 'javascripts']\n\n\n# SOURCE LINE 7\n\ndef inherit(context):\n if context.get('use_panels'):\n return '/webapps/tool_shed/base_panels.mako'\n else:\n return '/base.mako'\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n # SOURCE LINE 2\n ns = runtime.TemplateNamespace('__anon_0x88e2e50', context._clean_inheritance_tokens(), templateuri=u'/message.mako', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, '__anon_0x88e2e50')] = ns\n\n # SOURCE LINE 4\n ns = runtime.TemplateNamespace('__anon_0x7ee9750', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/common/common.mako', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, '__anon_0x7ee9750')] = ns\n\n # SOURCE LINE 5\n ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/repository/common.mako', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, '__anon_0x8a2fd90')] = ns\n\n # SOURCE LINE 3\n ns = runtime.TemplateNamespace('__anon_0x88e21d0', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/common/repository_actions_menu.mako', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, '__anon_0x88e21d0')] = ns\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, (inherit(context)), _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])\n status = _import_ns.get('status', context.get('status', UNDEFINED))\n render_clone_str = _import_ns.get('render_clone_str', context.get('render_clone_str', UNDEFINED))\n render_repository_type_select_field = _import_ns.get('render_repository_type_select_field', context.get('render_repository_type_select_field', UNDEFINED))\n render_msg = _import_ns.get('render_msg', context.get('render_msg', UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository', UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n render_tool_shed_repository_actions = _import_ns.get('render_tool_shed_repository_actions', context.get('render_tool_shed_repository_actions', UNDEFINED))\n is_malicious = _import_ns.get('is_malicious', context.get('is_malicious', UNDEFINED))\n repository_type_select_field = _import_ns.get('repository_type_select_field', context.get('repository_type_select_field', UNDEFINED))\n commit_message = _import_ns.get('commit_message', context.get('commit_message', UNDEFINED))\n message = _import_ns.get('message', context.get('message', UNDEFINED))\n trans = _import_ns.get('trans', context.get('trans', UNDEFINED))\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n')\n # SOURCE LINE 2\n __M_writer(u'\\n')\n # SOURCE LINE 3\n __M_writer(u'\\n')\n # SOURCE LINE 4\n __M_writer(u'\\n')\n # SOURCE LINE 5\n __M_writer(u'\\n\\n')\n # SOURCE LINE 13\n __M_writer(u'\\n')\n # SOURCE LINE 14\n __M_writer(u'\\n\\n')\n # SOURCE LINE 19\n __M_writer(u'\\n\\n')\n # SOURCE LINE 25\n __M_writer(u'\\n\\n')\n # SOURCE LINE 27\n\n is_new = repository.is_new( trans.app )\n can_push = trans.app.security_agent.can_push( trans.app, trans.user, repository )\n can_download = not is_new and ( not is_malicious or can_push )\n can_browse_contents = not is_new\n \n \n __M_locals_builtin_stored = __M_locals_builtin()\n __M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push','can_browse_contents','is_new','can_download'] if __M_key in __M_locals_builtin_stored]))\n # SOURCE LINE 32\n __M_writer(u'\\n\\n')\n # SOURCE LINE 34\n __M_writer(unicode(render_tool_shed_repository_actions( repository )))\n __M_writer(u'\\n\\n')\n # SOURCE LINE 36\n if message:\n # SOURCE LINE 37\n __M_writer(u' ')\n __M_writer(unicode(render_msg( message, status )))\n __M_writer(u'\\n')\n pass\n # SOURCE LINE 39\n __M_writer(u'\\n')\n # SOURCE LINE 40\n if can_browse_contents:\n # SOURCE LINE 41\n __M_writer(u' <div class=\"toolForm\">\\n <div class=\"toolFormTitle\">Repository \\'')\n # SOURCE LINE 42\n __M_writer(filters.html_escape(unicode(repository.name )))\n __M_writer(u\"' revision \")\n __M_writer(filters.html_escape(unicode(repository.tip( trans.app ) )))\n __M_writer(u' (repository tip)</div>\\n')\n # SOURCE LINE 43\n if can_download:\n # SOURCE LINE 44\n __M_writer(u' <div class=\"form-row\">\\n <label>Clone this repository:</label>\\n ')\n # SOURCE LINE 46\n __M_writer(unicode(render_clone_str( repository )))\n __M_writer(u'\\n </div>\\n')\n pass\n # SOURCE LINE 49\n __M_writer(u' <form name=\"repository_type\">\\n ')\n # SOURCE LINE 50\n __M_writer(unicode(render_repository_type_select_field( repository_type_select_field, render_help=False )))\n __M_writer(u'\\n </form>\\n')\n # SOURCE LINE 52\n if can_push:\n # SOURCE LINE 53\n __M_writer(u' <form name=\"select_files_to_delete\" id=\"select_files_to_delete\" action=\"')\n __M_writer(unicode(h.url_for( controller='repository', action='select_files_to_delete', id=trans.security.encode_id( repository.id ))))\n __M_writer(u'\" method=\"post\" >\\n <div class=\"form-row\" >\\n <label>Contents:</label>\\n <div id=\"tree\" >\\n Loading...\\n </div>\\n <div class=\"toolParamHelp\" style=\"clear: both;\">\\n Click on a file to display it\\'s contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\\n </div>\\n <input id=\"selected_files_to_delete\" name=\"selected_files_to_delete\" type=\"hidden\" value=\"\"/>\\n </div>\\n <div class=\"form-row\">\\n <label>Message:</label>\\n <div class=\"form-row-input\">\\n')\n # SOURCE LINE 67\n if commit_message:\n # SOURCE LINE 68\n __M_writer(u' <textarea name=\"commit_message\" rows=\"3\" cols=\"35\">')\n __M_writer(filters.html_escape(unicode(commit_message )))\n __M_writer(u'</textarea>\\n')\n # SOURCE LINE 69\n else:\n # SOURCE LINE 70\n __M_writer(u' <textarea name=\"commit_message\" rows=\"3\" cols=\"35\"></textarea>\\n')\n pass\n # SOURCE LINE 72\n __M_writer(u' </div>\\n <div class=\"toolParamHelp\" style=\"clear: both;\">\\n This is the commit message for the mercurial change set that will be created if you delete selected files.\\n </div>\\n <div style=\"clear: both\"></div>\\n </div>\\n <div class=\"form-row\">\\n <input type=\"submit\" name=\"select_files_to_delete_button\" value=\"Delete selected files\"/>\\n </div>\\n <div class=\"form-row\">\\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\\n </div>\\n </form>\\n')\n # SOURCE LINE 85\n else:\n # SOURCE LINE 86\n __M_writer(u' <div class=\"toolFormBody\">\\n <div class=\"form-row\" >\\n <label>Contents:</label>\\n <div id=\"tree\" >\\n Loading...\\n </div>\\n </div>\\n <div class=\"form-row\">\\n <div id=\"file_contents\" class=\"toolParamHelp\" style=\"clear: both;background-color:#FAFAFA;\"></div>\\n </div>\\n </div>\\n')\n pass\n # SOURCE LINE 98\n __M_writer(u' </div>\\n <p/>\\n')\n pass\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_stylesheets(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n # SOURCE LINE 16\n __M_writer(u'\\n ')\n # SOURCE LINE 17\n __M_writer(unicode(parent.stylesheets()))\n __M_writer(u'\\n ')\n # SOURCE LINE 18\n __M_writer(unicode(h.css( \"jquery.rating\", \"dynatree_skin/ui.dynatree\" )))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_javascripts(context):\n context.caller_stack._push_frame()\n try:\n _import_ns = {}\n _mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])\n _mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])\n _mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])\n _mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])\n common_javascripts = _import_ns.get('common_javascripts', context.get('common_javascripts', UNDEFINED))\n h = _import_ns.get('h', context.get('h', UNDEFINED))\n repository = _import_ns.get('repository', context.get('repository', UNDEFINED))\n parent = _import_ns.get('parent', context.get('parent', UNDEFINED))\n __M_writer = context.writer()\n # SOURCE LINE 21\n __M_writer(u'\\n ')\n # SOURCE LINE 22\n __M_writer(unicode(parent.javascripts()))\n __M_writer(u'\\n ')\n # SOURCE LINE 23\n __M_writer(unicode(h.js( \"libs/jquery/jquery.rating\", \"libs/jquery/jquery-ui\", \"libs/jquery/jquery.cookie\", \"libs/jquery/jquery.dynatree\" )))\n __M_writer(u'\\n ')\n # SOURCE LINE 24\n __M_writer(unicode(common_javascripts(repository)))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
randomNumber = random.randint(1, 100)
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
main()
<|reserved_special_token_1|>
import random
randomNumber = random.randint(1, 100)
def main():
keep_going = 'y'
while keep_going == 'y':
guess = int(input('\nGuess a number between 1 and 100: '))
if guess > randomNumber:
print('\nToo high, try again.')
elif guess < randomNumber:
print('\nToo low, try again')
else:
print('\nCongratulations, you guessed the correct number!')
keep_going = 'n'
main()
<|reserved_special_token_1|>
# Random number guessing game.
# 10 July 20
# CTI-110 P5HW1 - Random Number
# Thelma Majette
import random
randomNumber = random.randint (1,100)
# main function
def main():
# Create a variable to control the loop.
keep_going = 'y'
while keep_going == 'y':
# Ask user for a number ()
guess = int(input('\nGuess a number between 1 and 100: '))
# Perform the selected action.
if guess > randomNumber:
print ('\nToo high, try again.' )
elif guess < randomNumber:
print ('\nToo low, try again' )
else:
print ('\nCongratulations, you guessed the correct number!')
keep_going ='n'
main ()
|
flexible
|
{
"blob_id": "c09c02a36a64e9522cfc8c0951bd6c98f404f09c",
"index": 367,
"step-1": "<mask token>\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n",
"step-3": "<mask token>\nrandomNumber = random.randint(1, 100)\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n",
"step-4": "import random\nrandomNumber = random.randint(1, 100)\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n",
"step-5": "# Random number guessing game.\r\n# 10 July 20\r\n# CTI-110 P5HW1 - Random Number\r\n# Thelma Majette\r\n\r\nimport random\r\n\r\nrandomNumber = random.randint (1,100)\r\n\r\n# main function\r\ndef main():\r\n\r\n # Create a variable to control the loop.\r\n keep_going = 'y'\r\n while keep_going == 'y':\r\n\r\n # Ask user for a number ()\r\n guess = int(input('\\nGuess a number between 1 and 100: '))\r\n\r\n # Perform the selected action.\r\n if guess > randomNumber:\r\n print ('\\nToo high, try again.' )\r\n elif guess < randomNumber:\r\n print ('\\nToo low, try again' )\r\n else:\r\n print ('\\nCongratulations, you guessed the correct number!')\r\n keep_going ='n'\r\n \r\n \r\n \r\nmain () \r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SoapySDR', [dirname(__file__)])
except ImportError:
import _SoapySDR
return _SoapySDR
if fp is not None:
try:
_mod = imp.load_module('_SoapySDR', fp, pathname, description)
finally:
fp.close()
return _mod
_SoapySDR = swig_import_helper()
del swig_import_helper
else:
import _SoapySDR
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _SoapySDR.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _SoapySDR.SwigPyIterator_value(self)
def incr(self, n=1): return _SoapySDR.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _SoapySDR.SwigPyIterator_decr(self, n)
def distance(self, *args): return _SoapySDR.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _SoapySDR.SwigPyIterator_equal(self, *args)
def copy(self): return _SoapySDR.SwigPyIterator_copy(self)
def next(self): return _SoapySDR.SwigPyIterator_next(self)
def __next__(self): return _SoapySDR.SwigPyIterator___next__(self)
def previous(self): return _SoapySDR.SwigPyIterator_previous(self)
def advance(self, *args): return _SoapySDR.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _SoapySDR.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _SoapySDR.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _SoapySDR.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _SoapySDR.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _SoapySDR.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _SoapySDR.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _SoapySDR.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
def KwargsFromString(*args):
return _SoapySDR.KwargsFromString(*args)
KwargsFromString = _SoapySDR.KwargsFromString
def KwargsToString(*args):
return _SoapySDR.KwargsToString(*args)
KwargsToString = _SoapySDR.KwargsToString
class Range(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Range, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Range, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SoapySDR.new_Range(*args)
try: self.this.append(this)
except: self.this = this
def minimum(self): return _SoapySDR.Range_minimum(self)
def maximum(self): return _SoapySDR.Range_maximum(self)
def step(self): return _SoapySDR.Range_step(self)
def __str__(self):
fields = [self.minimum(), self.maximum()]
if self.step() != 0.0: fields.append(self.step())
return ', '.join(['%g'%f for f in fields])
__swig_destroy__ = _SoapySDR.delete_Range
__del__ = lambda self : None;
Range_swigregister = _SoapySDR.Range_swigregister
Range_swigregister(Range)
class ArgInfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)
__repr__ = _swig_repr
def __init__(self):
this = _SoapySDR.new_ArgInfo()
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["key"] = _SoapySDR.ArgInfo_key_set
__swig_getmethods__["key"] = _SoapySDR.ArgInfo_key_get
if _newclass:key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.ArgInfo_key_set)
__swig_setmethods__["value"] = _SoapySDR.ArgInfo_value_set
__swig_getmethods__["value"] = _SoapySDR.ArgInfo_value_get
if _newclass:value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.ArgInfo_value_set)
__swig_setmethods__["name"] = _SoapySDR.ArgInfo_name_set
__swig_getmethods__["name"] = _SoapySDR.ArgInfo_name_get
if _newclass:name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.ArgInfo_name_set)
__swig_setmethods__["description"] = _SoapySDR.ArgInfo_description_set
__swig_getmethods__["description"] = _SoapySDR.ArgInfo_description_get
if _newclass:description = _swig_property(_SoapySDR.ArgInfo_description_get, _SoapySDR.ArgInfo_description_set)
__swig_setmethods__["units"] = _SoapySDR.ArgInfo_units_set
__swig_getmethods__["units"] = _SoapySDR.ArgInfo_units_get
if _newclass:units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.ArgInfo_units_set)
BOOL = _SoapySDR.ArgInfo_BOOL
INT = _SoapySDR.ArgInfo_INT
FLOAT = _SoapySDR.ArgInfo_FLOAT
STRING = _SoapySDR.ArgInfo_STRING
__swig_setmethods__["type"] = _SoapySDR.ArgInfo_type_set
__swig_getmethods__["type"] = _SoapySDR.ArgInfo_type_get
if _newclass:type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.ArgInfo_type_set)
__swig_setmethods__["range"] = _SoapySDR.ArgInfo_range_set
__swig_getmethods__["range"] = _SoapySDR.ArgInfo_range_get
if _newclass:range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.ArgInfo_range_set)
__swig_setmethods__["options"] = _SoapySDR.ArgInfo_options_set
__swig_getmethods__["options"] = _SoapySDR.ArgInfo_options_get
if _newclass:options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.ArgInfo_options_set)
__swig_setmethods__["optionNames"] = _SoapySDR.ArgInfo_optionNames_set
__swig_getmethods__["optionNames"] = _SoapySDR.ArgInfo_optionNames_get
if _newclass:optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get, _SoapySDR.ArgInfo_optionNames_set)
__swig_destroy__ = _SoapySDR.delete_ArgInfo
__del__ = lambda self : None;
ArgInfo_swigregister = _SoapySDR.ArgInfo_swigregister
ArgInfo_swigregister(ArgInfo)
class SoapySDRKwargs(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargs, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRKwargs_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRKwargs___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRKwargs___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRKwargs___len__(self)
def __iter__(self): return self.key_iterator()
def iterkeys(self): return self.key_iterator()
def itervalues(self): return self.value_iterator()
def iteritems(self): return self.iterator()
def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)
def has_key(self, *args): return _SoapySDR.SoapySDRKwargs_has_key(self, *args)
def keys(self): return _SoapySDR.SoapySDRKwargs_keys(self)
def values(self): return _SoapySDR.SoapySDRKwargs_values(self)
def items(self): return _SoapySDR.SoapySDRKwargs_items(self)
def __contains__(self, *args): return _SoapySDR.SoapySDRKwargs___contains__(self, *args)
def key_iterator(self): return _SoapySDR.SoapySDRKwargs_key_iterator(self)
def value_iterator(self): return _SoapySDR.SoapySDRKwargs_value_iterator(self)
def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)
def asdict(self): return _SoapySDR.SoapySDRKwargs_asdict(self)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRKwargs(*args)
try: self.this.append(this)
except: self.this = this
def empty(self): return _SoapySDR.SoapySDRKwargs_empty(self)
def size(self): return _SoapySDR.SoapySDRKwargs_size(self)
def clear(self): return _SoapySDR.SoapySDRKwargs_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRKwargs_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRKwargs_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRKwargs_begin(self)
def end(self): return _SoapySDR.SoapySDRKwargs_end(self)
def rbegin(self): return _SoapySDR.SoapySDRKwargs_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRKwargs_rend(self)
def count(self, *args): return _SoapySDR.SoapySDRKwargs_count(self, *args)
def erase(self, *args): return _SoapySDR.SoapySDRKwargs_erase(self, *args)
def find(self, *args): return _SoapySDR.SoapySDRKwargs_find(self, *args)
def lower_bound(self, *args): return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)
def upper_bound(self, *args): return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)
def __str__(self):
out = list()
for k, v in self.iteritems():
out.append("%s=%s"%(k, v))
return '{'+(', '.join(out))+'}'
__swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs
__del__ = lambda self : None;
SoapySDRKwargs_swigregister = _SoapySDR.SoapySDRKwargs_swigregister
SoapySDRKwargs_swigregister(SoapySDRKwargs)
class SoapySDRKwargsList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargsList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRKwargsList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRKwargsList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRKwargsList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRKwargsList___len__(self)
def pop(self): return _SoapySDR.SoapySDRKwargsList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRKwargsList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRKwargsList_empty(self)
def size(self): return _SoapySDR.SoapySDRKwargsList_size(self)
def clear(self): return _SoapySDR.SoapySDRKwargsList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRKwargsList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRKwargsList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRKwargsList_begin(self)
def end(self): return _SoapySDR.SoapySDRKwargsList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRKwargsList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRKwargsList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRKwargsList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRKwargsList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRKwargsList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRKwargsList_front(self)
def back(self): return _SoapySDR.SoapySDRKwargsList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRKwargsList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRKwargsList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRKwargsList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRKwargsList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList
__del__ = lambda self : None;
SoapySDRKwargsList_swigregister = _SoapySDR.SoapySDRKwargsList_swigregister
SoapySDRKwargsList_swigregister(SoapySDRKwargsList)
class SoapySDRArgInfoList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRArgInfoList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRArgInfoList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRArgInfoList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRArgInfoList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRArgInfoList___len__(self)
def pop(self): return _SoapySDR.SoapySDRArgInfoList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRArgInfoList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRArgInfoList_empty(self)
def size(self): return _SoapySDR.SoapySDRArgInfoList_size(self)
def clear(self): return _SoapySDR.SoapySDRArgInfoList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRArgInfoList_begin(self)
def end(self): return _SoapySDR.SoapySDRArgInfoList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRArgInfoList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRArgInfoList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRArgInfoList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRArgInfoList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRArgInfoList_front(self)
def back(self): return _SoapySDR.SoapySDRArgInfoList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRArgInfoList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList
__del__ = lambda self : None;
SoapySDRArgInfoList_swigregister = _SoapySDR.SoapySDRArgInfoList_swigregister
SoapySDRArgInfoList_swigregister(SoapySDRArgInfoList)
class SoapySDRStringList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRStringList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRStringList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRStringList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRStringList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRStringList___len__(self)
def pop(self): return _SoapySDR.SoapySDRStringList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRStringList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRStringList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRStringList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRStringList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRStringList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRStringList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRStringList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRStringList_empty(self)
def size(self): return _SoapySDR.SoapySDRStringList_size(self)
def clear(self): return _SoapySDR.SoapySDRStringList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRStringList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRStringList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRStringList_begin(self)
def end(self): return _SoapySDR.SoapySDRStringList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRStringList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRStringList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRStringList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRStringList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRStringList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRStringList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRStringList_front(self)
def back(self): return _SoapySDR.SoapySDRStringList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRStringList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRStringList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRStringList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRStringList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRStringList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRStringList
__del__ = lambda self : None;
SoapySDRStringList_swigregister = _SoapySDR.SoapySDRStringList_swigregister
SoapySDRStringList_swigregister(SoapySDRStringList)
class SoapySDRRangeList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRRangeList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRRangeList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRRangeList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRRangeList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRRangeList___len__(self)
def pop(self): return _SoapySDR.SoapySDRRangeList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRRangeList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRRangeList_empty(self)
def size(self): return _SoapySDR.SoapySDRRangeList_size(self)
def clear(self): return _SoapySDR.SoapySDRRangeList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRRangeList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRRangeList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRRangeList_begin(self)
def end(self): return _SoapySDR.SoapySDRRangeList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRRangeList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRRangeList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRRangeList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRRangeList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRRangeList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRRangeList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRRangeList_front(self)
def back(self): return _SoapySDR.SoapySDRRangeList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRRangeList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRRangeList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRRangeList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRRangeList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRRangeList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList
__del__ = lambda self : None;
SoapySDRRangeList_swigregister = _SoapySDR.SoapySDRRangeList_swigregister
SoapySDRRangeList_swigregister(SoapySDRRangeList)
class SoapySDRSizeList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRSizeList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRSizeList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRSizeList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRSizeList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRSizeList___len__(self)
def pop(self): return _SoapySDR.SoapySDRSizeList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRSizeList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRSizeList_empty(self)
def size(self): return _SoapySDR.SoapySDRSizeList_size(self)
def clear(self): return _SoapySDR.SoapySDRSizeList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRSizeList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRSizeList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRSizeList_begin(self)
def end(self): return _SoapySDR.SoapySDRSizeList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRSizeList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRSizeList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRSizeList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRSizeList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRSizeList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRSizeList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRSizeList_front(self)
def back(self): return _SoapySDR.SoapySDRSizeList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRSizeList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRSizeList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRSizeList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRSizeList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRSizeList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList
__del__ = lambda self : None;
SoapySDRSizeList_swigregister = _SoapySDR.SoapySDRSizeList_swigregister
SoapySDRSizeList_swigregister(SoapySDRSizeList)
class SoapySDRDoubleList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRDoubleList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRDoubleList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRDoubleList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRDoubleList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRDoubleList___len__(self)
def pop(self): return _SoapySDR.SoapySDRDoubleList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRDoubleList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRDoubleList_empty(self)
def size(self): return _SoapySDR.SoapySDRDoubleList_size(self)
def clear(self): return _SoapySDR.SoapySDRDoubleList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRDoubleList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRDoubleList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRDoubleList_begin(self)
def end(self): return _SoapySDR.SoapySDRDoubleList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRDoubleList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRDoubleList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRDoubleList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRDoubleList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRDoubleList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRDoubleList_front(self)
def back(self): return _SoapySDR.SoapySDRDoubleList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRDoubleList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRDoubleList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRDoubleList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRDoubleList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList
__del__ = lambda self : None;
SoapySDRDoubleList_swigregister = _SoapySDR.SoapySDRDoubleList_swigregister
SoapySDRDoubleList_swigregister(SoapySDRDoubleList)
class StreamResult(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StreamResult, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)
__repr__ = _swig_repr
def __init__(self):
this = _SoapySDR.new_StreamResult()
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["ret"] = _SoapySDR.StreamResult_ret_set
__swig_getmethods__["ret"] = _SoapySDR.StreamResult_ret_get
if _newclass:ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.StreamResult_ret_set)
__swig_setmethods__["flags"] = _SoapySDR.StreamResult_flags_set
__swig_getmethods__["flags"] = _SoapySDR.StreamResult_flags_get
if _newclass:flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.StreamResult_flags_set)
__swig_setmethods__["timeNs"] = _SoapySDR.StreamResult_timeNs_set
__swig_getmethods__["timeNs"] = _SoapySDR.StreamResult_timeNs_get
if _newclass:timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get, _SoapySDR.StreamResult_timeNs_set)
__swig_setmethods__["chanMask"] = _SoapySDR.StreamResult_chanMask_set
__swig_getmethods__["chanMask"] = _SoapySDR.StreamResult_chanMask_get
if _newclass:chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get, _SoapySDR.StreamResult_chanMask_set)
def __str__(self):
return "ret=%s, flags=%s, timeNs=%s"%(self.ret, self.flags, self.timeNs)
__swig_destroy__ = _SoapySDR.delete_StreamResult
__del__ = lambda self : None;
StreamResult_swigregister = _SoapySDR.StreamResult_swigregister
StreamResult_swigregister(StreamResult)
SOAPY_SDR_TX = _SoapySDR.SOAPY_SDR_TX
SOAPY_SDR_RX = _SoapySDR.SOAPY_SDR_RX
SOAPY_SDR_END_BURST = _SoapySDR.SOAPY_SDR_END_BURST
SOAPY_SDR_HAS_TIME = _SoapySDR.SOAPY_SDR_HAS_TIME
SOAPY_SDR_END_ABRUPT = _SoapySDR.SOAPY_SDR_END_ABRUPT
SOAPY_SDR_ONE_PACKET = _SoapySDR.SOAPY_SDR_ONE_PACKET
SOAPY_SDR_MORE_FRAGMENTS = _SoapySDR.SOAPY_SDR_MORE_FRAGMENTS
SOAPY_SDR_WAIT_TRIGGER = _SoapySDR.SOAPY_SDR_WAIT_TRIGGER
def SoapySDR_errToStr(*args):
return _SoapySDR.SoapySDR_errToStr(*args)
SoapySDR_errToStr = _SoapySDR.SoapySDR_errToStr
SOAPY_SDR_TIMEOUT = _SoapySDR.SOAPY_SDR_TIMEOUT
SOAPY_SDR_STREAM_ERROR = _SoapySDR.SOAPY_SDR_STREAM_ERROR
SOAPY_SDR_CORRUPTION = _SoapySDR.SOAPY_SDR_CORRUPTION
SOAPY_SDR_OVERFLOW = _SoapySDR.SOAPY_SDR_OVERFLOW
SOAPY_SDR_NOT_SUPPORTED = _SoapySDR.SOAPY_SDR_NOT_SUPPORTED
SOAPY_SDR_TIME_ERROR = _SoapySDR.SOAPY_SDR_TIME_ERROR
SOAPY_SDR_UNDERFLOW = _SoapySDR.SOAPY_SDR_UNDERFLOW
SOAPY_SDR_API_VERSION = _SoapySDR.SOAPY_SDR_API_VERSION
SOAPY_SDR_ABI_VERSION = _SoapySDR.SOAPY_SDR_ABI_VERSION
def SoapySDR_getAPIVersion():
return _SoapySDR.SoapySDR_getAPIVersion()
SoapySDR_getAPIVersion = _SoapySDR.SoapySDR_getAPIVersion
def SoapySDR_getABIVersion():
return _SoapySDR.SoapySDR_getABIVersion()
SoapySDR_getABIVersion = _SoapySDR.SoapySDR_getABIVersion
def SoapySDR_getLibVersion():
return _SoapySDR.SoapySDR_getLibVersion()
SoapySDR_getLibVersion = _SoapySDR.SoapySDR_getLibVersion
SOAPY_SDR_CF64 = _SoapySDR.SOAPY_SDR_CF64
SOAPY_SDR_CF32 = _SoapySDR.SOAPY_SDR_CF32
SOAPY_SDR_CS32 = _SoapySDR.SOAPY_SDR_CS32
SOAPY_SDR_CU32 = _SoapySDR.SOAPY_SDR_CU32
SOAPY_SDR_CS16 = _SoapySDR.SOAPY_SDR_CS16
SOAPY_SDR_CU16 = _SoapySDR.SOAPY_SDR_CU16
SOAPY_SDR_CS12 = _SoapySDR.SOAPY_SDR_CS12
SOAPY_SDR_CU12 = _SoapySDR.SOAPY_SDR_CU12
SOAPY_SDR_CS8 = _SoapySDR.SOAPY_SDR_CS8
SOAPY_SDR_CU8 = _SoapySDR.SOAPY_SDR_CU8
SOAPY_SDR_CS4 = _SoapySDR.SOAPY_SDR_CS4
SOAPY_SDR_CU4 = _SoapySDR.SOAPY_SDR_CU4
SOAPY_SDR_F64 = _SoapySDR.SOAPY_SDR_F64
SOAPY_SDR_F32 = _SoapySDR.SOAPY_SDR_F32
SOAPY_SDR_S32 = _SoapySDR.SOAPY_SDR_S32
SOAPY_SDR_U32 = _SoapySDR.SOAPY_SDR_U32
SOAPY_SDR_S16 = _SoapySDR.SOAPY_SDR_S16
SOAPY_SDR_U16 = _SoapySDR.SOAPY_SDR_U16
SOAPY_SDR_S8 = _SoapySDR.SOAPY_SDR_S8
SOAPY_SDR_U8 = _SoapySDR.SOAPY_SDR_U8
def SoapySDR_formatToSize(*args):
return _SoapySDR.SoapySDR_formatToSize(*args)
SoapySDR_formatToSize = _SoapySDR.SoapySDR_formatToSize
SOAPY_SDR_FATAL = _SoapySDR.SOAPY_SDR_FATAL
SOAPY_SDR_CRITICAL = _SoapySDR.SOAPY_SDR_CRITICAL
SOAPY_SDR_ERROR = _SoapySDR.SOAPY_SDR_ERROR
SOAPY_SDR_WARNING = _SoapySDR.SOAPY_SDR_WARNING
SOAPY_SDR_NOTICE = _SoapySDR.SOAPY_SDR_NOTICE
SOAPY_SDR_INFO = _SoapySDR.SOAPY_SDR_INFO
SOAPY_SDR_DEBUG = _SoapySDR.SOAPY_SDR_DEBUG
SOAPY_SDR_TRACE = _SoapySDR.SOAPY_SDR_TRACE
SOAPY_SDR_SSI = _SoapySDR.SOAPY_SDR_SSI
def SoapySDR_log(*args):
return _SoapySDR.SoapySDR_log(*args)
SoapySDR_log = _SoapySDR.SoapySDR_log
def SoapySDR_setLogLevel(*args):
return _SoapySDR.SoapySDR_setLogLevel(*args)
SoapySDR_setLogLevel = _SoapySDR.SoapySDR_setLogLevel
def errToStr(*args):
return _SoapySDR.errToStr(*args)
errToStr = _SoapySDR.errToStr
def getAPIVersion():
return _SoapySDR.getAPIVersion()
getAPIVersion = _SoapySDR.getAPIVersion
def getABIVersion():
return _SoapySDR.getABIVersion()
getABIVersion = _SoapySDR.getABIVersion
def getLibVersion():
return _SoapySDR.getLibVersion()
getLibVersion = _SoapySDR.getLibVersion
def getRootPath():
return _SoapySDR.getRootPath()
getRootPath = _SoapySDR.getRootPath
def listSearchPaths():
return _SoapySDR.listSearchPaths()
listSearchPaths = _SoapySDR.listSearchPaths
def listModules(*args):
return _SoapySDR.listModules(*args)
listModules = _SoapySDR.listModules
def loadModule(*args):
return _SoapySDR.loadModule(*args)
loadModule = _SoapySDR.loadModule
def getLoaderResult(*args):
return _SoapySDR.getLoaderResult(*args)
getLoaderResult = _SoapySDR.getLoaderResult
def unloadModule(*args):
return _SoapySDR.unloadModule(*args)
unloadModule = _SoapySDR.unloadModule
def loadModules():
return _SoapySDR.loadModules()
loadModules = _SoapySDR.loadModules
def formatToSize(*args):
return _SoapySDR.formatToSize(*args)
formatToSize = _SoapySDR.formatToSize
def ticksToTimeNs(*args):
return _SoapySDR.ticksToTimeNs(*args)
ticksToTimeNs = _SoapySDR.ticksToTimeNs
def timeNsToTicks(*args):
return _SoapySDR.timeNsToTicks(*args)
timeNsToTicks = _SoapySDR.timeNsToTicks
def log(*args):
return _SoapySDR.log(*args)
log = _SoapySDR.log
def setLogLevel(*args):
return _SoapySDR.setLogLevel(*args)
setLogLevel = _SoapySDR.setLogLevel
class Device(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Device, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Device, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _SoapySDR.delete_Device
__del__ = lambda self : None;
__swig_getmethods__["enumerate"] = lambda x: _SoapySDR.Device_enumerate
if _newclass:enumerate = staticmethod(_SoapySDR.Device_enumerate)
__swig_getmethods__["make"] = lambda x: _SoapySDR.Device_make
if _newclass:make = staticmethod(_SoapySDR.Device_make)
__swig_getmethods__["unmake"] = lambda x: _SoapySDR.Device_unmake
if _newclass:unmake = staticmethod(_SoapySDR.Device_unmake)
def getDriverKey(self): return _SoapySDR.Device_getDriverKey(self)
def getHardwareKey(self): return _SoapySDR.Device_getHardwareKey(self)
def getHardwareInfo(self): return _SoapySDR.Device_getHardwareInfo(self)
def setFrontendMapping(self, *args): return _SoapySDR.Device_setFrontendMapping(self, *args)
def getFrontendMapping(self, *args): return _SoapySDR.Device_getFrontendMapping(self, *args)
def getNumChannels(self, *args): return _SoapySDR.Device_getNumChannels(self, *args)
def getChannelInfo(self, *args): return _SoapySDR.Device_getChannelInfo(self, *args)
def getFullDuplex(self, *args): return _SoapySDR.Device_getFullDuplex(self, *args)
def getStreamFormats(self, *args): return _SoapySDR.Device_getStreamFormats(self, *args)
def getNativeStreamFormat(self, *args): return _SoapySDR.Device_getNativeStreamFormat(self, *args)
def getStreamArgsInfo(self, *args): return _SoapySDR.Device_getStreamArgsInfo(self, *args)
def setupStream(self, *args): return _SoapySDR.Device_setupStream(self, *args)
def closeStream(self, *args): return _SoapySDR.Device_closeStream(self, *args)
def getStreamMTU(self, *args): return _SoapySDR.Device_getStreamMTU(self, *args)
def activateStream(self, *args): return _SoapySDR.Device_activateStream(self, *args)
def deactivateStream(self, *args): return _SoapySDR.Device_deactivateStream(self, *args)
def readStream(self, *args): return _SoapySDR.Device_readStream(self, *args)
def writeStream(self, *args): return _SoapySDR.Device_writeStream(self, *args)
def readStreamStatus(self, *args): return _SoapySDR.Device_readStreamStatus(self, *args)
def getNumDirectAccessBuffers(self, *args): return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)
def getDirectAccessBufferAddrs(self, *args): return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)
def acquireReadBuffer(self, *args): return _SoapySDR.Device_acquireReadBuffer(self, *args)
def releaseReadBuffer(self, *args): return _SoapySDR.Device_releaseReadBuffer(self, *args)
def acquireWriteBuffer(self, *args): return _SoapySDR.Device_acquireWriteBuffer(self, *args)
def releaseWriteBuffer(self, *args): return _SoapySDR.Device_releaseWriteBuffer(self, *args)
def listAntennas(self, *args): return _SoapySDR.Device_listAntennas(self, *args)
def setAntenna(self, *args): return _SoapySDR.Device_setAntenna(self, *args)
def getAntenna(self, *args): return _SoapySDR.Device_getAntenna(self, *args)
def hasDCOffsetMode(self, *args): return _SoapySDR.Device_hasDCOffsetMode(self, *args)
def setDCOffsetMode(self, *args): return _SoapySDR.Device_setDCOffsetMode(self, *args)
def getDCOffsetMode(self, *args): return _SoapySDR.Device_getDCOffsetMode(self, *args)
def hasDCOffset(self, *args): return _SoapySDR.Device_hasDCOffset(self, *args)
def setDCOffset(self, *args): return _SoapySDR.Device_setDCOffset(self, *args)
def getDCOffset(self, *args): return _SoapySDR.Device_getDCOffset(self, *args)
def hasIQBalance(self, *args): return _SoapySDR.Device_hasIQBalance(self, *args)
def setIQBalance(self, *args): return _SoapySDR.Device_setIQBalance(self, *args)
def getIQBalance(self, *args): return _SoapySDR.Device_getIQBalance(self, *args)
def hasFrequencyCorrection(self, *args): return _SoapySDR.Device_hasFrequencyCorrection(self, *args)
def setFrequencyCorrection(self, *args): return _SoapySDR.Device_setFrequencyCorrection(self, *args)
def getFrequencyCorrection(self, *args): return _SoapySDR.Device_getFrequencyCorrection(self, *args)
def listGains(self, *args): return _SoapySDR.Device_listGains(self, *args)
def hasGainMode(self, *args): return _SoapySDR.Device_hasGainMode(self, *args)
def setGainMode(self, *args): return _SoapySDR.Device_setGainMode(self, *args)
def getGainMode(self, *args): return _SoapySDR.Device_getGainMode(self, *args)
def setGain(self, *args): return _SoapySDR.Device_setGain(self, *args)
def getGain(self, *args): return _SoapySDR.Device_getGain(self, *args)
def getGainRange(self, *args): return _SoapySDR.Device_getGainRange(self, *args)
def setFrequency(self, *args): return _SoapySDR.Device_setFrequency(self, *args)
def getFrequency(self, *args): return _SoapySDR.Device_getFrequency(self, *args)
def listFrequencies(self, *args): return _SoapySDR.Device_listFrequencies(self, *args)
def getFrequencyRange(self, *args): return _SoapySDR.Device_getFrequencyRange(self, *args)
def getFrequencyArgsInfo(self, *args): return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)
def setSampleRate(self, *args): return _SoapySDR.Device_setSampleRate(self, *args)
def getSampleRate(self, *args): return _SoapySDR.Device_getSampleRate(self, *args)
def listSampleRates(self, *args): return _SoapySDR.Device_listSampleRates(self, *args)
def getSampleRateRange(self, *args): return _SoapySDR.Device_getSampleRateRange(self, *args)
def setBandwidth(self, *args): return _SoapySDR.Device_setBandwidth(self, *args)
def getBandwidth(self, *args): return _SoapySDR.Device_getBandwidth(self, *args)
def listBandwidths(self, *args): return _SoapySDR.Device_listBandwidths(self, *args)
def getBandwidthRange(self, *args): return _SoapySDR.Device_getBandwidthRange(self, *args)
def setMasterClockRate(self, *args): return _SoapySDR.Device_setMasterClockRate(self, *args)
def getMasterClockRate(self): return _SoapySDR.Device_getMasterClockRate(self)
def getMasterClockRates(self): return _SoapySDR.Device_getMasterClockRates(self)
def listClockSources(self): return _SoapySDR.Device_listClockSources(self)
def setClockSource(self, *args): return _SoapySDR.Device_setClockSource(self, *args)
def getClockSource(self): return _SoapySDR.Device_getClockSource(self)
def listTimeSources(self): return _SoapySDR.Device_listTimeSources(self)
def setTimeSource(self, *args): return _SoapySDR.Device_setTimeSource(self, *args)
def getTimeSource(self): return _SoapySDR.Device_getTimeSource(self)
def hasHardwareTime(self, what=""): return _SoapySDR.Device_hasHardwareTime(self, what)
def getHardwareTime(self, what=""): return _SoapySDR.Device_getHardwareTime(self, what)
def setHardwareTime(self, *args): return _SoapySDR.Device_setHardwareTime(self, *args)
def setCommandTime(self, *args): return _SoapySDR.Device_setCommandTime(self, *args)
def listSensors(self, *args): return _SoapySDR.Device_listSensors(self, *args)
def getSensorInfo(self, *args): return _SoapySDR.Device_getSensorInfo(self, *args)
def readSensor(self, *args): return _SoapySDR.Device_readSensor(self, *args)
def listRegisterInterfaces(self): return _SoapySDR.Device_listRegisterInterfaces(self)
def writeRegister(self, *args): return _SoapySDR.Device_writeRegister(self, *args)
def readRegister(self, *args): return _SoapySDR.Device_readRegister(self, *args)
def writeRegisters(self, *args): return _SoapySDR.Device_writeRegisters(self, *args)
def readRegisters(self, *args): return _SoapySDR.Device_readRegisters(self, *args)
def getSettingInfo(self, *args): return _SoapySDR.Device_getSettingInfo(self, *args)
def writeSetting(self, *args): return _SoapySDR.Device_writeSetting(self, *args)
def readSetting(self, *args): return _SoapySDR.Device_readSetting(self, *args)
def listGPIOBanks(self): return _SoapySDR.Device_listGPIOBanks(self)
def writeGPIO(self, *args): return _SoapySDR.Device_writeGPIO(self, *args)
def readGPIO(self, *args): return _SoapySDR.Device_readGPIO(self, *args)
def writeGPIODir(self, *args): return _SoapySDR.Device_writeGPIODir(self, *args)
def readGPIODir(self, *args): return _SoapySDR.Device_readGPIODir(self, *args)
def writeI2C(self, *args): return _SoapySDR.Device_writeI2C(self, *args)
def readI2C(self, *args): return _SoapySDR.Device_readI2C(self, *args)
def transactSPI(self, *args): return _SoapySDR.Device_transactSPI(self, *args)
def listUARTs(self): return _SoapySDR.Device_listUARTs(self)
def writeUART(self, *args): return _SoapySDR.Device_writeUART(self, *args)
def readUART(self, *args): return _SoapySDR.Device_readUART(self, *args)
def readStream__(self, *args): return _SoapySDR.Device_readStream__(self, *args)
def writeStream__(self, *args): return _SoapySDR.Device_writeStream__(self, *args)
def readStreamStatus__(self, *args): return _SoapySDR.Device_readStreamStatus__(self, *args)
#call unmake from custom deleter
def __del__(self):
Device.unmake(self)
def __str__(self):
return "%s:%s"%(self.getDriverKey(), self.getHardwareKey())
def readStream(self, stream, buffs, numElems, flags = 0, timeoutUs = 100000):
ptrs = [extractBuffPointer(b) for b in buffs]
return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)
def writeStream(self, stream, buffs, numElems, flags = 0, timeNs = 0, timeoutUs = 100000):
ptrs = [extractBuffPointer(b) for b in buffs]
return self.writeStream__(stream, ptrs, numElems, flags, timeNs, timeoutUs)
def readStreamStatus(self, stream, timeoutUs = 100000):
return self.readStreamStatus__(stream, timeoutUs)
Device_swigregister = _SoapySDR.Device_swigregister
Device_swigregister(Device)
def Device_enumerate(*args):
return _SoapySDR.Device_enumerate(*args)
Device_enumerate = _SoapySDR.Device_enumerate
def Device_make(*args):
return _SoapySDR.Device_make(*args)
Device_make = _SoapySDR.Device_make
def Device_unmake(*args):
return _SoapySDR.Device_unmake(*args)
Device_unmake = _SoapySDR.Device_unmake
__all__ = list()
for key in sorted(globals().keys()):
if key.startswith('SOAPY_SDR_'):
__all__.append(key)
_Device = Device
class Device(Device):
def __new__(cls, *args, **kwargs):
return cls.make(*args, **kwargs)
def extractBuffPointer(buff):
if hasattr(buff, '__array_interface__'): return buff.__array_interface__['data'][0]
if hasattr(buff, '__long__'): return long(buff)
if hasattr(buff, '__int__'): return int(buff)
raise Exception("Unrecognized data format: " + str(type(buff)))
# This file is compatible with both classic and new-style classes.
|
normal
|
{
"blob_id": "a6670d0d09f02b674bc31b770f42d4d8a01a4a4e",
"index": 9884,
"step-1": "<mask token>\n\n\nclass SoapySDRSizeList(_object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n <mask token>\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n <mask token>\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n <mask token>\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n <mask token>\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SoapySDRKwargsList(_object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargsList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n <mask token>\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargsList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargsList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRKwargsList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n <mask token>\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargsList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargsList_size(self)\n <mask token>\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargsList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargsList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargsList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n <mask token>\n\n def front(self):\n return _SoapySDR.SoapySDRKwargsList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRKwargsList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n <mask token>\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRKwargsList_capacity(self)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self,\n SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRArgInfoList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRArgInfoList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRArgInfoList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRArgInfoList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRArgInfoList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRArgInfoList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRArgInfoList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRArgInfoList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRArgInfoList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRArgInfoList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRStringList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRStringList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRStringList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRStringList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRStringList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRStringList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRStringList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRStringList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRStringList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRStringList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRStringList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRStringList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRStringList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRStringList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRStringList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRStringList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRStringList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRStringList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRStringList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRStringList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRStringList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRStringList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRStringList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRRangeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRRangeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRRangeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRRangeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRRangeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRRangeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRRangeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRRangeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRRangeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRRangeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRRangeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRRangeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRRangeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRRangeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRRangeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name\n )\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRSizeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRSizeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRSizeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _swig_setattr(self, class_type, name, value):\n return _swig_setattr_nondynamic(self, class_type, name, value, 0)\n\n\ndef _swig_getattr(self, class_type, name):\n if name == 'thisown':\n return self.this.own()\n method = class_type.__swig_getmethods__.get(name, None)\n if method:\n return method(self)\n raise AttributeError(name)\n\n\ndef _swig_repr(self):\n try:\n strthis = 'proxy of ' + self.this.__repr__()\n except:\n strthis = ''\n return '<%s.%s; %s >' % (self.__class__.__module__, self.__class__.\n __name__, strthis)\n\n\n<mask token>\n\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined - class is abstract')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_SwigPyIterator\n __del__ = lambda self: None\n\n def value(self):\n return _SoapySDR.SwigPyIterator_value(self)\n\n def incr(self, n=1):\n return _SoapySDR.SwigPyIterator_incr(self, n)\n\n def decr(self, n=1):\n return _SoapySDR.SwigPyIterator_decr(self, n)\n\n def distance(self, *args):\n return _SoapySDR.SwigPyIterator_distance(self, *args)\n\n def equal(self, *args):\n return _SoapySDR.SwigPyIterator_equal(self, *args)\n\n def copy(self):\n return _SoapySDR.SwigPyIterator_copy(self)\n\n def next(self):\n return _SoapySDR.SwigPyIterator_next(self)\n\n def __next__(self):\n return _SoapySDR.SwigPyIterator___next__(self)\n\n def previous(self):\n return _SoapySDR.SwigPyIterator_previous(self)\n\n def advance(self, *args):\n return _SoapySDR.SwigPyIterator_advance(self, *args)\n\n def __eq__(self, *args):\n return _SoapySDR.SwigPyIterator___eq__(self, *args)\n\n def __ne__(self, *args):\n return _SoapySDR.SwigPyIterator___ne__(self, *args)\n\n def __iadd__(self, *args):\n return _SoapySDR.SwigPyIterator___iadd__(self, *args)\n\n def __isub__(self, *args):\n return _SoapySDR.SwigPyIterator___isub__(self, *args)\n\n def __add__(self, *args):\n return _SoapySDR.SwigPyIterator___add__(self, *args)\n\n def __sub__(self, *args):\n return _SoapySDR.SwigPyIterator___sub__(self, *args)\n\n def __iter__(self):\n return self\n\n\n<mask token>\n\n\ndef KwargsToString(*args):\n return _SoapySDR.KwargsToString(*args)\n\n\n<mask token>\n\n\nclass Range(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Range, name,\n value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Range, name)\n __repr__ = _swig_repr\n\n def __init__(self, *args):\n this = _SoapySDR.new_Range(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def minimum(self):\n return _SoapySDR.Range_minimum(self)\n\n def maximum(self):\n return _SoapySDR.Range_maximum(self)\n\n def step(self):\n return _SoapySDR.Range_step(self)\n\n def __str__(self):\n fields = [self.minimum(), self.maximum()]\n if self.step() != 0.0:\n fields.append(self.step())\n return ', '.join([('%g' % f) for f in fields])\n __swig_destroy__ = _SoapySDR.delete_Range\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass ArgInfo(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_ArgInfo()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['key'] = _SoapySDR.ArgInfo_key_set\n __swig_getmethods__['key'] = _SoapySDR.ArgInfo_key_get\n if _newclass:\n key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.\n ArgInfo_key_set)\n __swig_setmethods__['value'] = _SoapySDR.ArgInfo_value_set\n __swig_getmethods__['value'] = _SoapySDR.ArgInfo_value_get\n if _newclass:\n value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.\n ArgInfo_value_set)\n __swig_setmethods__['name'] = _SoapySDR.ArgInfo_name_set\n __swig_getmethods__['name'] = _SoapySDR.ArgInfo_name_get\n if _newclass:\n name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.\n ArgInfo_name_set)\n __swig_setmethods__['description'] = _SoapySDR.ArgInfo_description_set\n __swig_getmethods__['description'] = _SoapySDR.ArgInfo_description_get\n if _newclass:\n description = _swig_property(_SoapySDR.ArgInfo_description_get,\n _SoapySDR.ArgInfo_description_set)\n __swig_setmethods__['units'] = _SoapySDR.ArgInfo_units_set\n __swig_getmethods__['units'] = _SoapySDR.ArgInfo_units_get\n if _newclass:\n units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.\n ArgInfo_units_set)\n BOOL = _SoapySDR.ArgInfo_BOOL\n INT = _SoapySDR.ArgInfo_INT\n FLOAT = _SoapySDR.ArgInfo_FLOAT\n STRING = _SoapySDR.ArgInfo_STRING\n __swig_setmethods__['type'] = _SoapySDR.ArgInfo_type_set\n __swig_getmethods__['type'] = _SoapySDR.ArgInfo_type_get\n if _newclass:\n type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.\n ArgInfo_type_set)\n __swig_setmethods__['range'] = _SoapySDR.ArgInfo_range_set\n __swig_getmethods__['range'] = _SoapySDR.ArgInfo_range_get\n if _newclass:\n range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.\n ArgInfo_range_set)\n __swig_setmethods__['options'] = _SoapySDR.ArgInfo_options_set\n __swig_getmethods__['options'] = _SoapySDR.ArgInfo_options_get\n if _newclass:\n options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.\n ArgInfo_options_set)\n __swig_setmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_set\n __swig_getmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_get\n if _newclass:\n optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get,\n _SoapySDR.ArgInfo_optionNames_set)\n __swig_destroy__ = _SoapySDR.delete_ArgInfo\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargs(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargs, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargs_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargs___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargs___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargs___len__(self)\n\n def __iter__(self):\n return self.key_iterator()\n\n def iterkeys(self):\n return self.key_iterator()\n\n def itervalues(self):\n return self.value_iterator()\n\n def iteritems(self):\n return self.iterator()\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)\n\n def has_key(self, *args):\n return _SoapySDR.SoapySDRKwargs_has_key(self, *args)\n\n def keys(self):\n return _SoapySDR.SoapySDRKwargs_keys(self)\n\n def values(self):\n return _SoapySDR.SoapySDRKwargs_values(self)\n\n def items(self):\n return _SoapySDR.SoapySDRKwargs_items(self)\n\n def __contains__(self, *args):\n return _SoapySDR.SoapySDRKwargs___contains__(self, *args)\n\n def key_iterator(self):\n return _SoapySDR.SoapySDRKwargs_key_iterator(self)\n\n def value_iterator(self):\n return _SoapySDR.SoapySDRKwargs_value_iterator(self)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)\n\n def asdict(self):\n return _SoapySDR.SoapySDRKwargs_asdict(self)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargs(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargs_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargs_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargs_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargs_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargs_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargs_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargs_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargs_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargs_rend(self)\n\n def count(self, *args):\n return _SoapySDR.SoapySDRKwargs_count(self, *args)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargs_erase(self, *args)\n\n def find(self, *args):\n return _SoapySDR.SoapySDRKwargs_find(self, *args)\n\n def lower_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)\n\n def upper_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)\n\n def __str__(self):\n out = list()\n for k, v in self.iteritems():\n out.append('%s=%s' % (k, v))\n return '{' + ', '.join(out) + '}'\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargsList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargsList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargsList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargsList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargsList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargsList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRKwargsList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargsList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargsList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargsList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargsList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargsList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargsList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRKwargsList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRKwargsList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRKwargsList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRKwargsList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self,\n SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRArgInfoList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRArgInfoList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRArgInfoList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRArgInfoList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRArgInfoList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRArgInfoList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRArgInfoList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRArgInfoList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRArgInfoList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRArgInfoList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRStringList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRStringList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRStringList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRStringList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRStringList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRStringList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRStringList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRStringList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRStringList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRStringList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRStringList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRStringList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRStringList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRStringList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRStringList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRStringList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRStringList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRStringList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRStringList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRStringList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRStringList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRStringList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRStringList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRRangeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRRangeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRRangeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRRangeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRRangeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRRangeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRRangeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRRangeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRRangeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRRangeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRRangeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRRangeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRRangeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRRangeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRRangeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name\n )\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRSizeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRSizeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRSizeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\ndef SoapySDR_getAPIVersion():\n return _SoapySDR.SoapySDR_getAPIVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_getABIVersion():\n return _SoapySDR.SoapySDR_getABIVersion()\n\n\n<mask token>\n\n\ndef loadModules():\n return _SoapySDR.loadModules()\n\n\n<mask token>\n\n\ndef formatToSize(*args):\n return _SoapySDR.formatToSize(*args)\n\n\n<mask token>\n\n\ndef ticksToTimeNs(*args):\n return _SoapySDR.ticksToTimeNs(*args)\n\n\n<mask token>\n\n\ndef setLogLevel(*args):\n return _SoapySDR.setLogLevel(*args)\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef _swig_setattr(self, class_type, name, value):\n return _swig_setattr_nondynamic(self, class_type, name, value, 0)\n\n\ndef _swig_getattr(self, class_type, name):\n if name == 'thisown':\n return self.this.own()\n method = class_type.__swig_getmethods__.get(name, None)\n if method:\n return method(self)\n raise AttributeError(name)\n\n\ndef _swig_repr(self):\n try:\n strthis = 'proxy of ' + self.this.__repr__()\n except:\n strthis = ''\n return '<%s.%s; %s >' % (self.__class__.__module__, self.__class__.\n __name__, strthis)\n\n\n<mask token>\n\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined - class is abstract')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_SwigPyIterator\n __del__ = lambda self: None\n\n def value(self):\n return _SoapySDR.SwigPyIterator_value(self)\n\n def incr(self, n=1):\n return _SoapySDR.SwigPyIterator_incr(self, n)\n\n def decr(self, n=1):\n return _SoapySDR.SwigPyIterator_decr(self, n)\n\n def distance(self, *args):\n return _SoapySDR.SwigPyIterator_distance(self, *args)\n\n def equal(self, *args):\n return _SoapySDR.SwigPyIterator_equal(self, *args)\n\n def copy(self):\n return _SoapySDR.SwigPyIterator_copy(self)\n\n def next(self):\n return _SoapySDR.SwigPyIterator_next(self)\n\n def __next__(self):\n return _SoapySDR.SwigPyIterator___next__(self)\n\n def previous(self):\n return _SoapySDR.SwigPyIterator_previous(self)\n\n def advance(self, *args):\n return _SoapySDR.SwigPyIterator_advance(self, *args)\n\n def __eq__(self, *args):\n return _SoapySDR.SwigPyIterator___eq__(self, *args)\n\n def __ne__(self, *args):\n return _SoapySDR.SwigPyIterator___ne__(self, *args)\n\n def __iadd__(self, *args):\n return _SoapySDR.SwigPyIterator___iadd__(self, *args)\n\n def __isub__(self, *args):\n return _SoapySDR.SwigPyIterator___isub__(self, *args)\n\n def __add__(self, *args):\n return _SoapySDR.SwigPyIterator___add__(self, *args)\n\n def __sub__(self, *args):\n return _SoapySDR.SwigPyIterator___sub__(self, *args)\n\n def __iter__(self):\n return self\n\n\n<mask token>\n\n\ndef KwargsToString(*args):\n return _SoapySDR.KwargsToString(*args)\n\n\n<mask token>\n\n\nclass Range(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Range, name,\n value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Range, name)\n __repr__ = _swig_repr\n\n def __init__(self, *args):\n this = _SoapySDR.new_Range(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def minimum(self):\n return _SoapySDR.Range_minimum(self)\n\n def maximum(self):\n return _SoapySDR.Range_maximum(self)\n\n def step(self):\n return _SoapySDR.Range_step(self)\n\n def __str__(self):\n fields = [self.minimum(), self.maximum()]\n if self.step() != 0.0:\n fields.append(self.step())\n return ', '.join([('%g' % f) for f in fields])\n __swig_destroy__ = _SoapySDR.delete_Range\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass ArgInfo(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_ArgInfo()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['key'] = _SoapySDR.ArgInfo_key_set\n __swig_getmethods__['key'] = _SoapySDR.ArgInfo_key_get\n if _newclass:\n key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.\n ArgInfo_key_set)\n __swig_setmethods__['value'] = _SoapySDR.ArgInfo_value_set\n __swig_getmethods__['value'] = _SoapySDR.ArgInfo_value_get\n if _newclass:\n value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.\n ArgInfo_value_set)\n __swig_setmethods__['name'] = _SoapySDR.ArgInfo_name_set\n __swig_getmethods__['name'] = _SoapySDR.ArgInfo_name_get\n if _newclass:\n name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.\n ArgInfo_name_set)\n __swig_setmethods__['description'] = _SoapySDR.ArgInfo_description_set\n __swig_getmethods__['description'] = _SoapySDR.ArgInfo_description_get\n if _newclass:\n description = _swig_property(_SoapySDR.ArgInfo_description_get,\n _SoapySDR.ArgInfo_description_set)\n __swig_setmethods__['units'] = _SoapySDR.ArgInfo_units_set\n __swig_getmethods__['units'] = _SoapySDR.ArgInfo_units_get\n if _newclass:\n units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.\n ArgInfo_units_set)\n BOOL = _SoapySDR.ArgInfo_BOOL\n INT = _SoapySDR.ArgInfo_INT\n FLOAT = _SoapySDR.ArgInfo_FLOAT\n STRING = _SoapySDR.ArgInfo_STRING\n __swig_setmethods__['type'] = _SoapySDR.ArgInfo_type_set\n __swig_getmethods__['type'] = _SoapySDR.ArgInfo_type_get\n if _newclass:\n type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.\n ArgInfo_type_set)\n __swig_setmethods__['range'] = _SoapySDR.ArgInfo_range_set\n __swig_getmethods__['range'] = _SoapySDR.ArgInfo_range_get\n if _newclass:\n range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.\n ArgInfo_range_set)\n __swig_setmethods__['options'] = _SoapySDR.ArgInfo_options_set\n __swig_getmethods__['options'] = _SoapySDR.ArgInfo_options_get\n if _newclass:\n options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.\n ArgInfo_options_set)\n __swig_setmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_set\n __swig_getmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_get\n if _newclass:\n optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get,\n _SoapySDR.ArgInfo_optionNames_set)\n __swig_destroy__ = _SoapySDR.delete_ArgInfo\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargs(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargs, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargs_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargs___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargs___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargs___len__(self)\n\n def __iter__(self):\n return self.key_iterator()\n\n def iterkeys(self):\n return self.key_iterator()\n\n def itervalues(self):\n return self.value_iterator()\n\n def iteritems(self):\n return self.iterator()\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)\n\n def has_key(self, *args):\n return _SoapySDR.SoapySDRKwargs_has_key(self, *args)\n\n def keys(self):\n return _SoapySDR.SoapySDRKwargs_keys(self)\n\n def values(self):\n return _SoapySDR.SoapySDRKwargs_values(self)\n\n def items(self):\n return _SoapySDR.SoapySDRKwargs_items(self)\n\n def __contains__(self, *args):\n return _SoapySDR.SoapySDRKwargs___contains__(self, *args)\n\n def key_iterator(self):\n return _SoapySDR.SoapySDRKwargs_key_iterator(self)\n\n def value_iterator(self):\n return _SoapySDR.SoapySDRKwargs_value_iterator(self)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)\n\n def asdict(self):\n return _SoapySDR.SoapySDRKwargs_asdict(self)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargs(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargs_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargs_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargs_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargs_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargs_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargs_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargs_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargs_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargs_rend(self)\n\n def count(self, *args):\n return _SoapySDR.SoapySDRKwargs_count(self, *args)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargs_erase(self, *args)\n\n def find(self, *args):\n return _SoapySDR.SoapySDRKwargs_find(self, *args)\n\n def lower_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)\n\n def upper_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)\n\n def __str__(self):\n out = list()\n for k, v in self.iteritems():\n out.append('%s=%s' % (k, v))\n return '{' + ', '.join(out) + '}'\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargsList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargsList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargsList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargsList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargsList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargsList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRKwargsList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargsList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargsList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargsList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargsList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargsList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargsList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRKwargsList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRKwargsList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRKwargsList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRKwargsList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self,\n SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRArgInfoList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRArgInfoList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRArgInfoList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRArgInfoList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRArgInfoList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRArgInfoList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRArgInfoList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRArgInfoList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRArgInfoList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRArgInfoList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRStringList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRStringList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRStringList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRStringList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRStringList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRStringList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRStringList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRStringList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRStringList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRStringList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRStringList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRStringList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRStringList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRStringList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRStringList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRStringList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRStringList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRStringList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRStringList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRStringList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRStringList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRStringList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRStringList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRRangeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRRangeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRRangeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRRangeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRRangeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRRangeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRRangeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRRangeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRRangeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRRangeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRRangeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRRangeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRRangeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRRangeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRRangeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name\n )\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRSizeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRSizeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRSizeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\ndef SoapySDR_errToStr(*args):\n return _SoapySDR.SoapySDR_errToStr(*args)\n\n\n<mask token>\n\n\ndef SoapySDR_getAPIVersion():\n return _SoapySDR.SoapySDR_getAPIVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_getABIVersion():\n return _SoapySDR.SoapySDR_getABIVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_getLibVersion():\n return _SoapySDR.SoapySDR_getLibVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_log(*args):\n return _SoapySDR.SoapySDR_log(*args)\n\n\n<mask token>\n\n\ndef SoapySDR_setLogLevel(*args):\n return _SoapySDR.SoapySDR_setLogLevel(*args)\n\n\n<mask token>\n\n\ndef errToStr(*args):\n return _SoapySDR.errToStr(*args)\n\n\n<mask token>\n\n\ndef getAPIVersion():\n return _SoapySDR.getAPIVersion()\n\n\n<mask token>\n\n\ndef getABIVersion():\n return _SoapySDR.getABIVersion()\n\n\n<mask token>\n\n\ndef getLibVersion():\n return _SoapySDR.getLibVersion()\n\n\n<mask token>\n\n\ndef getRootPath():\n return _SoapySDR.getRootPath()\n\n\n<mask token>\n\n\ndef listSearchPaths():\n return _SoapySDR.listSearchPaths()\n\n\n<mask token>\n\n\ndef listModules(*args):\n return _SoapySDR.listModules(*args)\n\n\n<mask token>\n\n\ndef loadModule(*args):\n return _SoapySDR.loadModule(*args)\n\n\n<mask token>\n\n\ndef getLoaderResult(*args):\n return _SoapySDR.getLoaderResult(*args)\n\n\n<mask token>\n\n\ndef unloadModule(*args):\n return _SoapySDR.unloadModule(*args)\n\n\n<mask token>\n\n\ndef loadModules():\n return _SoapySDR.loadModules()\n\n\n<mask token>\n\n\ndef formatToSize(*args):\n return _SoapySDR.formatToSize(*args)\n\n\n<mask token>\n\n\ndef ticksToTimeNs(*args):\n return _SoapySDR.ticksToTimeNs(*args)\n\n\n<mask token>\n\n\ndef log(*args):\n return _SoapySDR.log(*args)\n\n\n<mask token>\n\n\ndef setLogLevel(*args):\n return _SoapySDR.setLogLevel(*args)\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\ndef Device_enumerate(*args):\n return _SoapySDR.Device_enumerate(*args)\n\n\n<mask token>\n\n\ndef Device_make(*args):\n return _SoapySDR.Device_make(*args)\n\n\n<mask token>\n\n\ndef Device_unmake(*args):\n return _SoapySDR.Device_unmake(*args)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\ndef extractBuffPointer(buff):\n if hasattr(buff, '__array_interface__'):\n return buff.__array_interface__['data'][0]\n if hasattr(buff, '__long__'):\n return long(buff)\n if hasattr(buff, '__int__'):\n return int(buff)\n raise Exception('Unrecognized data format: ' + str(type(buff)))\n",
"step-5": "# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 2.0.12\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\n\n\n\n\nfrom sys import version_info\nif version_info >= (2,6,0):\n def swig_import_helper():\n from os.path import dirname\n import imp\n fp = None\n try:\n fp, pathname, description = imp.find_module('_SoapySDR', [dirname(__file__)])\n except ImportError:\n import _SoapySDR\n return _SoapySDR\n if fp is not None:\n try:\n _mod = imp.load_module('_SoapySDR', fp, pathname, description)\n finally:\n fp.close()\n return _mod\n _SoapySDR = swig_import_helper()\n del swig_import_helper\nelse:\n import _SoapySDR\ndel version_info\ntry:\n _swig_property = property\nexcept NameError:\n pass # Python < 2.2 doesn't have 'property'.\ndef _swig_setattr_nondynamic(self,class_type,name,value,static=1):\n if (name == \"thisown\"): return self.this.own(value)\n if (name == \"this\"):\n if type(value).__name__ == 'SwigPyObject':\n self.__dict__[name] = value\n return\n method = class_type.__swig_setmethods__.get(name,None)\n if method: return method(self,value)\n if (not static):\n self.__dict__[name] = value\n else:\n raise AttributeError(\"You cannot add attributes to %s\" % self)\n\ndef _swig_setattr(self,class_type,name,value):\n return _swig_setattr_nondynamic(self,class_type,name,value,0)\n\ndef _swig_getattr(self,class_type,name):\n if (name == \"thisown\"): return self.this.own()\n method = class_type.__swig_getmethods__.get(name,None)\n if method: return method(self)\n raise AttributeError(name)\n\ndef _swig_repr(self):\n try: strthis = \"proxy of \" + self.this.__repr__()\n except: strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\ntry:\n _object = object\n _newclass = 1\nexcept AttributeError:\n class _object : pass\n _newclass = 0\n\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n def __init__(self, *args, **kwargs): raise AttributeError(\"No constructor defined - class is abstract\")\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_SwigPyIterator\n __del__ = lambda self : None;\n def value(self): return _SoapySDR.SwigPyIterator_value(self)\n def incr(self, n=1): return _SoapySDR.SwigPyIterator_incr(self, n)\n def decr(self, n=1): return _SoapySDR.SwigPyIterator_decr(self, n)\n def distance(self, *args): return _SoapySDR.SwigPyIterator_distance(self, *args)\n def equal(self, *args): return _SoapySDR.SwigPyIterator_equal(self, *args)\n def copy(self): return _SoapySDR.SwigPyIterator_copy(self)\n def next(self): return _SoapySDR.SwigPyIterator_next(self)\n def __next__(self): return _SoapySDR.SwigPyIterator___next__(self)\n def previous(self): return _SoapySDR.SwigPyIterator_previous(self)\n def advance(self, *args): return _SoapySDR.SwigPyIterator_advance(self, *args)\n def __eq__(self, *args): return _SoapySDR.SwigPyIterator___eq__(self, *args)\n def __ne__(self, *args): return _SoapySDR.SwigPyIterator___ne__(self, *args)\n def __iadd__(self, *args): return _SoapySDR.SwigPyIterator___iadd__(self, *args)\n def __isub__(self, *args): return _SoapySDR.SwigPyIterator___isub__(self, *args)\n def __add__(self, *args): return _SoapySDR.SwigPyIterator___add__(self, *args)\n def __sub__(self, *args): return _SoapySDR.SwigPyIterator___sub__(self, *args)\n def __iter__(self): return self\nSwigPyIterator_swigregister = _SoapySDR.SwigPyIterator_swigregister\nSwigPyIterator_swigregister(SwigPyIterator)\n\n\ndef KwargsFromString(*args):\n return _SoapySDR.KwargsFromString(*args)\nKwargsFromString = _SoapySDR.KwargsFromString\n\ndef KwargsToString(*args):\n return _SoapySDR.KwargsToString(*args)\nKwargsToString = _SoapySDR.KwargsToString\nclass Range(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Range, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Range, name)\n __repr__ = _swig_repr\n def __init__(self, *args): \n this = _SoapySDR.new_Range(*args)\n try: self.this.append(this)\n except: self.this = this\n def minimum(self): return _SoapySDR.Range_minimum(self)\n def maximum(self): return _SoapySDR.Range_maximum(self)\n def step(self): return _SoapySDR.Range_step(self)\n def __str__(self):\n fields = [self.minimum(), self.maximum()]\n if self.step() != 0.0: fields.append(self.step())\n return ', '.join(['%g'%f for f in fields])\n\n __swig_destroy__ = _SoapySDR.delete_Range\n __del__ = lambda self : None;\nRange_swigregister = _SoapySDR.Range_swigregister\nRange_swigregister(Range)\n\nclass ArgInfo(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)\n __repr__ = _swig_repr\n def __init__(self): \n this = _SoapySDR.new_ArgInfo()\n try: self.this.append(this)\n except: self.this = this\n __swig_setmethods__[\"key\"] = _SoapySDR.ArgInfo_key_set\n __swig_getmethods__[\"key\"] = _SoapySDR.ArgInfo_key_get\n if _newclass:key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.ArgInfo_key_set)\n __swig_setmethods__[\"value\"] = _SoapySDR.ArgInfo_value_set\n __swig_getmethods__[\"value\"] = _SoapySDR.ArgInfo_value_get\n if _newclass:value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.ArgInfo_value_set)\n __swig_setmethods__[\"name\"] = _SoapySDR.ArgInfo_name_set\n __swig_getmethods__[\"name\"] = _SoapySDR.ArgInfo_name_get\n if _newclass:name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.ArgInfo_name_set)\n __swig_setmethods__[\"description\"] = _SoapySDR.ArgInfo_description_set\n __swig_getmethods__[\"description\"] = _SoapySDR.ArgInfo_description_get\n if _newclass:description = _swig_property(_SoapySDR.ArgInfo_description_get, _SoapySDR.ArgInfo_description_set)\n __swig_setmethods__[\"units\"] = _SoapySDR.ArgInfo_units_set\n __swig_getmethods__[\"units\"] = _SoapySDR.ArgInfo_units_get\n if _newclass:units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.ArgInfo_units_set)\n BOOL = _SoapySDR.ArgInfo_BOOL\n INT = _SoapySDR.ArgInfo_INT\n FLOAT = _SoapySDR.ArgInfo_FLOAT\n STRING = _SoapySDR.ArgInfo_STRING\n __swig_setmethods__[\"type\"] = _SoapySDR.ArgInfo_type_set\n __swig_getmethods__[\"type\"] = _SoapySDR.ArgInfo_type_get\n if _newclass:type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.ArgInfo_type_set)\n __swig_setmethods__[\"range\"] = _SoapySDR.ArgInfo_range_set\n __swig_getmethods__[\"range\"] = _SoapySDR.ArgInfo_range_get\n if _newclass:range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.ArgInfo_range_set)\n __swig_setmethods__[\"options\"] = _SoapySDR.ArgInfo_options_set\n __swig_getmethods__[\"options\"] = _SoapySDR.ArgInfo_options_get\n if _newclass:options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.ArgInfo_options_set)\n __swig_setmethods__[\"optionNames\"] = _SoapySDR.ArgInfo_optionNames_set\n __swig_getmethods__[\"optionNames\"] = _SoapySDR.ArgInfo_optionNames_get\n if _newclass:optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get, _SoapySDR.ArgInfo_optionNames_set)\n __swig_destroy__ = _SoapySDR.delete_ArgInfo\n __del__ = lambda self : None;\nArgInfo_swigregister = _SoapySDR.ArgInfo_swigregister\nArgInfo_swigregister(ArgInfo)\n\nclass SoapySDRKwargs(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargs, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRKwargs_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRKwargs___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRKwargs___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRKwargs___len__(self)\n def __iter__(self): return self.key_iterator()\n def iterkeys(self): return self.key_iterator()\n def itervalues(self): return self.value_iterator()\n def iteritems(self): return self.iterator()\n def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)\n def has_key(self, *args): return _SoapySDR.SoapySDRKwargs_has_key(self, *args)\n def keys(self): return _SoapySDR.SoapySDRKwargs_keys(self)\n def values(self): return _SoapySDR.SoapySDRKwargs_values(self)\n def items(self): return _SoapySDR.SoapySDRKwargs_items(self)\n def __contains__(self, *args): return _SoapySDR.SoapySDRKwargs___contains__(self, *args)\n def key_iterator(self): return _SoapySDR.SoapySDRKwargs_key_iterator(self)\n def value_iterator(self): return _SoapySDR.SoapySDRKwargs_value_iterator(self)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)\n def asdict(self): return _SoapySDR.SoapySDRKwargs_asdict(self)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRKwargs(*args)\n try: self.this.append(this)\n except: self.this = this\n def empty(self): return _SoapySDR.SoapySDRKwargs_empty(self)\n def size(self): return _SoapySDR.SoapySDRKwargs_size(self)\n def clear(self): return _SoapySDR.SoapySDRKwargs_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRKwargs_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRKwargs_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRKwargs_begin(self)\n def end(self): return _SoapySDR.SoapySDRKwargs_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRKwargs_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRKwargs_rend(self)\n def count(self, *args): return _SoapySDR.SoapySDRKwargs_count(self, *args)\n def erase(self, *args): return _SoapySDR.SoapySDRKwargs_erase(self, *args)\n def find(self, *args): return _SoapySDR.SoapySDRKwargs_find(self, *args)\n def lower_bound(self, *args): return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)\n def upper_bound(self, *args): return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)\n def __str__(self):\n out = list()\n for k, v in self.iteritems():\n out.append(\"%s=%s\"%(k, v))\n return '{'+(', '.join(out))+'}'\n\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs\n __del__ = lambda self : None;\nSoapySDRKwargs_swigregister = _SoapySDR.SoapySDRKwargs_swigregister\nSoapySDRKwargs_swigregister(SoapySDRKwargs)\n\nclass SoapySDRKwargsList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargsList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRKwargsList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRKwargsList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRKwargsList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRKwargsList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRKwargsList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRKwargsList_empty(self)\n def size(self): return _SoapySDR.SoapySDRKwargsList_size(self)\n def clear(self): return _SoapySDR.SoapySDRKwargsList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRKwargsList_begin(self)\n def end(self): return _SoapySDR.SoapySDRKwargsList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRKwargsList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRKwargsList_front(self)\n def back(self): return _SoapySDR.SoapySDRKwargsList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRKwargsList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRKwargsList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList\n __del__ = lambda self : None;\nSoapySDRKwargsList_swigregister = _SoapySDR.SoapySDRKwargsList_swigregister\nSoapySDRKwargsList_swigregister(SoapySDRKwargsList)\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRArgInfoList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRArgInfoList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRArgInfoList_empty(self)\n def size(self): return _SoapySDR.SoapySDRArgInfoList_size(self)\n def clear(self): return _SoapySDR.SoapySDRArgInfoList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRArgInfoList_begin(self)\n def end(self): return _SoapySDR.SoapySDRArgInfoList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRArgInfoList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRArgInfoList_front(self)\n def back(self): return _SoapySDR.SoapySDRArgInfoList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self : None;\nSoapySDRArgInfoList_swigregister = _SoapySDR.SoapySDRArgInfoList_swigregister\nSoapySDRArgInfoList_swigregister(SoapySDRArgInfoList)\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRStringList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRStringList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRStringList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRStringList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRStringList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRStringList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRStringList_empty(self)\n def size(self): return _SoapySDR.SoapySDRStringList_size(self)\n def clear(self): return _SoapySDR.SoapySDRStringList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRStringList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRStringList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRStringList_begin(self)\n def end(self): return _SoapySDR.SoapySDRStringList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRStringList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRStringList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRStringList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRStringList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRStringList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRStringList_front(self)\n def back(self): return _SoapySDR.SoapySDRStringList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRStringList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRStringList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRStringList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self : None;\nSoapySDRStringList_swigregister = _SoapySDR.SoapySDRStringList_swigregister\nSoapySDRStringList_swigregister(SoapySDRStringList)\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRRangeList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRRangeList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRRangeList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRRangeList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRRangeList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRRangeList_empty(self)\n def size(self): return _SoapySDR.SoapySDRRangeList_size(self)\n def clear(self): return _SoapySDR.SoapySDRRangeList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRRangeList_begin(self)\n def end(self): return _SoapySDR.SoapySDRRangeList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRRangeList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRRangeList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRRangeList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRRangeList_front(self)\n def back(self): return _SoapySDR.SoapySDRRangeList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self : None;\nSoapySDRRangeList_swigregister = _SoapySDR.SoapySDRRangeList_swigregister\nSoapySDRRangeList_swigregister(SoapySDRRangeList)\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRSizeList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRSizeList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRSizeList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRSizeList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRSizeList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRSizeList_empty(self)\n def size(self): return _SoapySDR.SoapySDRSizeList_size(self)\n def clear(self): return _SoapySDR.SoapySDRSizeList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRSizeList_begin(self)\n def end(self): return _SoapySDR.SoapySDRSizeList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRSizeList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRSizeList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRSizeList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRSizeList_front(self)\n def back(self): return _SoapySDR.SoapySDRSizeList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self : None;\nSoapySDRSizeList_swigregister = _SoapySDR.SoapySDRSizeList_swigregister\nSoapySDRSizeList_swigregister(SoapySDRSizeList)\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRDoubleList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRDoubleList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRDoubleList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRDoubleList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRDoubleList_empty(self)\n def size(self): return _SoapySDR.SoapySDRDoubleList_size(self)\n def clear(self): return _SoapySDR.SoapySDRDoubleList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRDoubleList_begin(self)\n def end(self): return _SoapySDR.SoapySDRDoubleList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRDoubleList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRDoubleList_front(self)\n def back(self): return _SoapySDR.SoapySDRDoubleList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self : None;\nSoapySDRDoubleList_swigregister = _SoapySDR.SoapySDRDoubleList_swigregister\nSoapySDRDoubleList_swigregister(SoapySDRDoubleList)\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n def __init__(self): \n this = _SoapySDR.new_StreamResult()\n try: self.this.append(this)\n except: self.this = this\n __swig_setmethods__[\"ret\"] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__[\"ret\"] = _SoapySDR.StreamResult_ret_get\n if _newclass:ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.StreamResult_ret_set)\n __swig_setmethods__[\"flags\"] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__[\"flags\"] = _SoapySDR.StreamResult_flags_get\n if _newclass:flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.StreamResult_flags_set)\n __swig_setmethods__[\"timeNs\"] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__[\"timeNs\"] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get, _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__[\"chanMask\"] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__[\"chanMask\"] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get, _SoapySDR.StreamResult_chanMask_set)\n def __str__(self):\n return \"ret=%s, flags=%s, timeNs=%s\"%(self.ret, self.flags, self.timeNs)\n\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self : None;\nStreamResult_swigregister = _SoapySDR.StreamResult_swigregister\nStreamResult_swigregister(StreamResult)\n\nSOAPY_SDR_TX = _SoapySDR.SOAPY_SDR_TX\nSOAPY_SDR_RX = _SoapySDR.SOAPY_SDR_RX\nSOAPY_SDR_END_BURST = _SoapySDR.SOAPY_SDR_END_BURST\nSOAPY_SDR_HAS_TIME = _SoapySDR.SOAPY_SDR_HAS_TIME\nSOAPY_SDR_END_ABRUPT = _SoapySDR.SOAPY_SDR_END_ABRUPT\nSOAPY_SDR_ONE_PACKET = _SoapySDR.SOAPY_SDR_ONE_PACKET\nSOAPY_SDR_MORE_FRAGMENTS = _SoapySDR.SOAPY_SDR_MORE_FRAGMENTS\nSOAPY_SDR_WAIT_TRIGGER = _SoapySDR.SOAPY_SDR_WAIT_TRIGGER\n\ndef SoapySDR_errToStr(*args):\n return _SoapySDR.SoapySDR_errToStr(*args)\nSoapySDR_errToStr = _SoapySDR.SoapySDR_errToStr\nSOAPY_SDR_TIMEOUT = _SoapySDR.SOAPY_SDR_TIMEOUT\nSOAPY_SDR_STREAM_ERROR = _SoapySDR.SOAPY_SDR_STREAM_ERROR\nSOAPY_SDR_CORRUPTION = _SoapySDR.SOAPY_SDR_CORRUPTION\nSOAPY_SDR_OVERFLOW = _SoapySDR.SOAPY_SDR_OVERFLOW\nSOAPY_SDR_NOT_SUPPORTED = _SoapySDR.SOAPY_SDR_NOT_SUPPORTED\nSOAPY_SDR_TIME_ERROR = _SoapySDR.SOAPY_SDR_TIME_ERROR\nSOAPY_SDR_UNDERFLOW = _SoapySDR.SOAPY_SDR_UNDERFLOW\nSOAPY_SDR_API_VERSION = _SoapySDR.SOAPY_SDR_API_VERSION\nSOAPY_SDR_ABI_VERSION = _SoapySDR.SOAPY_SDR_ABI_VERSION\n\ndef SoapySDR_getAPIVersion():\n return _SoapySDR.SoapySDR_getAPIVersion()\nSoapySDR_getAPIVersion = _SoapySDR.SoapySDR_getAPIVersion\n\ndef SoapySDR_getABIVersion():\n return _SoapySDR.SoapySDR_getABIVersion()\nSoapySDR_getABIVersion = _SoapySDR.SoapySDR_getABIVersion\n\ndef SoapySDR_getLibVersion():\n return _SoapySDR.SoapySDR_getLibVersion()\nSoapySDR_getLibVersion = _SoapySDR.SoapySDR_getLibVersion\nSOAPY_SDR_CF64 = _SoapySDR.SOAPY_SDR_CF64\nSOAPY_SDR_CF32 = _SoapySDR.SOAPY_SDR_CF32\nSOAPY_SDR_CS32 = _SoapySDR.SOAPY_SDR_CS32\nSOAPY_SDR_CU32 = _SoapySDR.SOAPY_SDR_CU32\nSOAPY_SDR_CS16 = _SoapySDR.SOAPY_SDR_CS16\nSOAPY_SDR_CU16 = _SoapySDR.SOAPY_SDR_CU16\nSOAPY_SDR_CS12 = _SoapySDR.SOAPY_SDR_CS12\nSOAPY_SDR_CU12 = _SoapySDR.SOAPY_SDR_CU12\nSOAPY_SDR_CS8 = _SoapySDR.SOAPY_SDR_CS8\nSOAPY_SDR_CU8 = _SoapySDR.SOAPY_SDR_CU8\nSOAPY_SDR_CS4 = _SoapySDR.SOAPY_SDR_CS4\nSOAPY_SDR_CU4 = _SoapySDR.SOAPY_SDR_CU4\nSOAPY_SDR_F64 = _SoapySDR.SOAPY_SDR_F64\nSOAPY_SDR_F32 = _SoapySDR.SOAPY_SDR_F32\nSOAPY_SDR_S32 = _SoapySDR.SOAPY_SDR_S32\nSOAPY_SDR_U32 = _SoapySDR.SOAPY_SDR_U32\nSOAPY_SDR_S16 = _SoapySDR.SOAPY_SDR_S16\nSOAPY_SDR_U16 = _SoapySDR.SOAPY_SDR_U16\nSOAPY_SDR_S8 = _SoapySDR.SOAPY_SDR_S8\nSOAPY_SDR_U8 = _SoapySDR.SOAPY_SDR_U8\n\ndef SoapySDR_formatToSize(*args):\n return _SoapySDR.SoapySDR_formatToSize(*args)\nSoapySDR_formatToSize = _SoapySDR.SoapySDR_formatToSize\nSOAPY_SDR_FATAL = _SoapySDR.SOAPY_SDR_FATAL\nSOAPY_SDR_CRITICAL = _SoapySDR.SOAPY_SDR_CRITICAL\nSOAPY_SDR_ERROR = _SoapySDR.SOAPY_SDR_ERROR\nSOAPY_SDR_WARNING = _SoapySDR.SOAPY_SDR_WARNING\nSOAPY_SDR_NOTICE = _SoapySDR.SOAPY_SDR_NOTICE\nSOAPY_SDR_INFO = _SoapySDR.SOAPY_SDR_INFO\nSOAPY_SDR_DEBUG = _SoapySDR.SOAPY_SDR_DEBUG\nSOAPY_SDR_TRACE = _SoapySDR.SOAPY_SDR_TRACE\nSOAPY_SDR_SSI = _SoapySDR.SOAPY_SDR_SSI\n\ndef SoapySDR_log(*args):\n return _SoapySDR.SoapySDR_log(*args)\nSoapySDR_log = _SoapySDR.SoapySDR_log\n\ndef SoapySDR_setLogLevel(*args):\n return _SoapySDR.SoapySDR_setLogLevel(*args)\nSoapySDR_setLogLevel = _SoapySDR.SoapySDR_setLogLevel\n\ndef errToStr(*args):\n return _SoapySDR.errToStr(*args)\nerrToStr = _SoapySDR.errToStr\n\ndef getAPIVersion():\n return _SoapySDR.getAPIVersion()\ngetAPIVersion = _SoapySDR.getAPIVersion\n\ndef getABIVersion():\n return _SoapySDR.getABIVersion()\ngetABIVersion = _SoapySDR.getABIVersion\n\ndef getLibVersion():\n return _SoapySDR.getLibVersion()\ngetLibVersion = _SoapySDR.getLibVersion\n\ndef getRootPath():\n return _SoapySDR.getRootPath()\ngetRootPath = _SoapySDR.getRootPath\n\ndef listSearchPaths():\n return _SoapySDR.listSearchPaths()\nlistSearchPaths = _SoapySDR.listSearchPaths\n\ndef listModules(*args):\n return _SoapySDR.listModules(*args)\nlistModules = _SoapySDR.listModules\n\ndef loadModule(*args):\n return _SoapySDR.loadModule(*args)\nloadModule = _SoapySDR.loadModule\n\ndef getLoaderResult(*args):\n return _SoapySDR.getLoaderResult(*args)\ngetLoaderResult = _SoapySDR.getLoaderResult\n\ndef unloadModule(*args):\n return _SoapySDR.unloadModule(*args)\nunloadModule = _SoapySDR.unloadModule\n\ndef loadModules():\n return _SoapySDR.loadModules()\nloadModules = _SoapySDR.loadModules\n\ndef formatToSize(*args):\n return _SoapySDR.formatToSize(*args)\nformatToSize = _SoapySDR.formatToSize\n\ndef ticksToTimeNs(*args):\n return _SoapySDR.ticksToTimeNs(*args)\nticksToTimeNs = _SoapySDR.ticksToTimeNs\n\ndef timeNsToTicks(*args):\n return _SoapySDR.timeNsToTicks(*args)\ntimeNsToTicks = _SoapySDR.timeNsToTicks\n\ndef log(*args):\n return _SoapySDR.log(*args)\nlog = _SoapySDR.log\n\ndef setLogLevel(*args):\n return _SoapySDR.setLogLevel(*args)\nsetLogLevel = _SoapySDR.setLogLevel\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n def __init__(self, *args, **kwargs): raise AttributeError(\"No constructor defined\")\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self : None;\n __swig_getmethods__[\"enumerate\"] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__[\"make\"] = lambda x: _SoapySDR.Device_make\n if _newclass:make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__[\"unmake\"] = lambda x: _SoapySDR.Device_unmake\n if _newclass:unmake = staticmethod(_SoapySDR.Device_unmake)\n def getDriverKey(self): return _SoapySDR.Device_getDriverKey(self)\n def getHardwareKey(self): return _SoapySDR.Device_getHardwareKey(self)\n def getHardwareInfo(self): return _SoapySDR.Device_getHardwareInfo(self)\n def setFrontendMapping(self, *args): return _SoapySDR.Device_setFrontendMapping(self, *args)\n def getFrontendMapping(self, *args): return _SoapySDR.Device_getFrontendMapping(self, *args)\n def getNumChannels(self, *args): return _SoapySDR.Device_getNumChannels(self, *args)\n def getChannelInfo(self, *args): return _SoapySDR.Device_getChannelInfo(self, *args)\n def getFullDuplex(self, *args): return _SoapySDR.Device_getFullDuplex(self, *args)\n def getStreamFormats(self, *args): return _SoapySDR.Device_getStreamFormats(self, *args)\n def getNativeStreamFormat(self, *args): return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n def getStreamArgsInfo(self, *args): return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n def setupStream(self, *args): return _SoapySDR.Device_setupStream(self, *args)\n def closeStream(self, *args): return _SoapySDR.Device_closeStream(self, *args)\n def getStreamMTU(self, *args): return _SoapySDR.Device_getStreamMTU(self, *args)\n def activateStream(self, *args): return _SoapySDR.Device_activateStream(self, *args)\n def deactivateStream(self, *args): return _SoapySDR.Device_deactivateStream(self, *args)\n def readStream(self, *args): return _SoapySDR.Device_readStream(self, *args)\n def writeStream(self, *args): return _SoapySDR.Device_writeStream(self, *args)\n def readStreamStatus(self, *args): return _SoapySDR.Device_readStreamStatus(self, *args)\n def getNumDirectAccessBuffers(self, *args): return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n def getDirectAccessBufferAddrs(self, *args): return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n def acquireReadBuffer(self, *args): return _SoapySDR.Device_acquireReadBuffer(self, *args)\n def releaseReadBuffer(self, *args): return _SoapySDR.Device_releaseReadBuffer(self, *args)\n def acquireWriteBuffer(self, *args): return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n def releaseWriteBuffer(self, *args): return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n def listAntennas(self, *args): return _SoapySDR.Device_listAntennas(self, *args)\n def setAntenna(self, *args): return _SoapySDR.Device_setAntenna(self, *args)\n def getAntenna(self, *args): return _SoapySDR.Device_getAntenna(self, *args)\n def hasDCOffsetMode(self, *args): return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n def setDCOffsetMode(self, *args): return _SoapySDR.Device_setDCOffsetMode(self, *args)\n def getDCOffsetMode(self, *args): return _SoapySDR.Device_getDCOffsetMode(self, *args)\n def hasDCOffset(self, *args): return _SoapySDR.Device_hasDCOffset(self, *args)\n def setDCOffset(self, *args): return _SoapySDR.Device_setDCOffset(self, *args)\n def getDCOffset(self, *args): return _SoapySDR.Device_getDCOffset(self, *args)\n def hasIQBalance(self, *args): return _SoapySDR.Device_hasIQBalance(self, *args)\n def setIQBalance(self, *args): return _SoapySDR.Device_setIQBalance(self, *args)\n def getIQBalance(self, *args): return _SoapySDR.Device_getIQBalance(self, *args)\n def hasFrequencyCorrection(self, *args): return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n def setFrequencyCorrection(self, *args): return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n def getFrequencyCorrection(self, *args): return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n def listGains(self, *args): return _SoapySDR.Device_listGains(self, *args)\n def hasGainMode(self, *args): return _SoapySDR.Device_hasGainMode(self, *args)\n def setGainMode(self, *args): return _SoapySDR.Device_setGainMode(self, *args)\n def getGainMode(self, *args): return _SoapySDR.Device_getGainMode(self, *args)\n def setGain(self, *args): return _SoapySDR.Device_setGain(self, *args)\n def getGain(self, *args): return _SoapySDR.Device_getGain(self, *args)\n def getGainRange(self, *args): return _SoapySDR.Device_getGainRange(self, *args)\n def setFrequency(self, *args): return _SoapySDR.Device_setFrequency(self, *args)\n def getFrequency(self, *args): return _SoapySDR.Device_getFrequency(self, *args)\n def listFrequencies(self, *args): return _SoapySDR.Device_listFrequencies(self, *args)\n def getFrequencyRange(self, *args): return _SoapySDR.Device_getFrequencyRange(self, *args)\n def getFrequencyArgsInfo(self, *args): return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n def setSampleRate(self, *args): return _SoapySDR.Device_setSampleRate(self, *args)\n def getSampleRate(self, *args): return _SoapySDR.Device_getSampleRate(self, *args)\n def listSampleRates(self, *args): return _SoapySDR.Device_listSampleRates(self, *args)\n def getSampleRateRange(self, *args): return _SoapySDR.Device_getSampleRateRange(self, *args)\n def setBandwidth(self, *args): return _SoapySDR.Device_setBandwidth(self, *args)\n def getBandwidth(self, *args): return _SoapySDR.Device_getBandwidth(self, *args)\n def listBandwidths(self, *args): return _SoapySDR.Device_listBandwidths(self, *args)\n def getBandwidthRange(self, *args): return _SoapySDR.Device_getBandwidthRange(self, *args)\n def setMasterClockRate(self, *args): return _SoapySDR.Device_setMasterClockRate(self, *args)\n def getMasterClockRate(self): return _SoapySDR.Device_getMasterClockRate(self)\n def getMasterClockRates(self): return _SoapySDR.Device_getMasterClockRates(self)\n def listClockSources(self): return _SoapySDR.Device_listClockSources(self)\n def setClockSource(self, *args): return _SoapySDR.Device_setClockSource(self, *args)\n def getClockSource(self): return _SoapySDR.Device_getClockSource(self)\n def listTimeSources(self): return _SoapySDR.Device_listTimeSources(self)\n def setTimeSource(self, *args): return _SoapySDR.Device_setTimeSource(self, *args)\n def getTimeSource(self): return _SoapySDR.Device_getTimeSource(self)\n def hasHardwareTime(self, what=\"\"): return _SoapySDR.Device_hasHardwareTime(self, what)\n def getHardwareTime(self, what=\"\"): return _SoapySDR.Device_getHardwareTime(self, what)\n def setHardwareTime(self, *args): return _SoapySDR.Device_setHardwareTime(self, *args)\n def setCommandTime(self, *args): return _SoapySDR.Device_setCommandTime(self, *args)\n def listSensors(self, *args): return _SoapySDR.Device_listSensors(self, *args)\n def getSensorInfo(self, *args): return _SoapySDR.Device_getSensorInfo(self, *args)\n def readSensor(self, *args): return _SoapySDR.Device_readSensor(self, *args)\n def listRegisterInterfaces(self): return _SoapySDR.Device_listRegisterInterfaces(self)\n def writeRegister(self, *args): return _SoapySDR.Device_writeRegister(self, *args)\n def readRegister(self, *args): return _SoapySDR.Device_readRegister(self, *args)\n def writeRegisters(self, *args): return _SoapySDR.Device_writeRegisters(self, *args)\n def readRegisters(self, *args): return _SoapySDR.Device_readRegisters(self, *args)\n def getSettingInfo(self, *args): return _SoapySDR.Device_getSettingInfo(self, *args)\n def writeSetting(self, *args): return _SoapySDR.Device_writeSetting(self, *args)\n def readSetting(self, *args): return _SoapySDR.Device_readSetting(self, *args)\n def listGPIOBanks(self): return _SoapySDR.Device_listGPIOBanks(self)\n def writeGPIO(self, *args): return _SoapySDR.Device_writeGPIO(self, *args)\n def readGPIO(self, *args): return _SoapySDR.Device_readGPIO(self, *args)\n def writeGPIODir(self, *args): return _SoapySDR.Device_writeGPIODir(self, *args)\n def readGPIODir(self, *args): return _SoapySDR.Device_readGPIODir(self, *args)\n def writeI2C(self, *args): return _SoapySDR.Device_writeI2C(self, *args)\n def readI2C(self, *args): return _SoapySDR.Device_readI2C(self, *args)\n def transactSPI(self, *args): return _SoapySDR.Device_transactSPI(self, *args)\n def listUARTs(self): return _SoapySDR.Device_listUARTs(self)\n def writeUART(self, *args): return _SoapySDR.Device_writeUART(self, *args)\n def readUART(self, *args): return _SoapySDR.Device_readUART(self, *args)\n def readStream__(self, *args): return _SoapySDR.Device_readStream__(self, *args)\n def writeStream__(self, *args): return _SoapySDR.Device_writeStream__(self, *args)\n def readStreamStatus__(self, *args): return _SoapySDR.Device_readStreamStatus__(self, *args)\n #call unmake from custom deleter\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return \"%s:%s\"%(self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags = 0, timeoutUs = 100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags = 0, timeNs = 0, timeoutUs = 100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs, timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs = 100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\nDevice_swigregister = _SoapySDR.Device_swigregister\nDevice_swigregister(Device)\n\ndef Device_enumerate(*args):\n return _SoapySDR.Device_enumerate(*args)\nDevice_enumerate = _SoapySDR.Device_enumerate\n\ndef Device_make(*args):\n return _SoapySDR.Device_make(*args)\nDevice_make = _SoapySDR.Device_make\n\ndef Device_unmake(*args):\n return _SoapySDR.Device_unmake(*args)\nDevice_unmake = _SoapySDR.Device_unmake\n\n__all__ = list()\nfor key in sorted(globals().keys()):\n if key.startswith('SOAPY_SDR_'):\n __all__.append(key)\n\n_Device = Device\nclass Device(Device):\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\ndef extractBuffPointer(buff):\n if hasattr(buff, '__array_interface__'): return buff.__array_interface__['data'][0]\n if hasattr(buff, '__long__'): return long(buff)\n if hasattr(buff, '__int__'): return int(buff)\n raise Exception(\"Unrecognized data format: \" + str(type(buff)))\n\n# This file is compatible with both classic and new-style classes.\n\n\n",
"step-ids": [
177,
316,
400,
419,
427
]
}
|
[
177,
316,
400,
419,
427
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import urllib2
# Es importante agregar la variable de ambiente:
# export PYTHONIOENCODING='UTF-8'
# para redireccionar la salida std a un archivo.
def call(url):
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request)
output = response.read()
jout = json.loads(output, 'utf-8')
return jout
except Exception as e:
print "Error en el llamado a la api con los parámetros:\n> url: %s"%(url)
sys.exit()
def call_api(id, attr=""):
url = '%s%s'%('https://api.mercadolibre.com/categories/', id)
response = call(url)
response['attribute'] = call(url+'/attributes')
return response
def process(jout):
categories = jout.get('children_categories', '')
if categories:
list = [ process(call_api(category.get('id'))) for category in categories]
jout['children_categories'] = list
return jout
else:
return jout
if __name__ == '__main__':
response = call_api(sys.argv[1])
jout = process(response)
print json.dumps(jout, sort_keys=False, indent=4, separators=(',', ': '), encoding="utf-8", ensure_ascii=False)
|
normal
|
{
"blob_id": "c81fde7fb5d63233c633b8e5353fe04477fef2af",
"index": 4770,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport json\nimport urllib2\n\n# Es importante agregar la variable de ambiente:\n# export PYTHONIOENCODING='UTF-8'\n# para redireccionar la salida std a un archivo.\n\ndef call(url):\n try:\n request = urllib2.Request(url)\n response = urllib2.urlopen(request)\n output = response.read()\n jout = json.loads(output, 'utf-8')\n return jout\n except Exception as e:\n print \"Error en el llamado a la api con los parámetros:\\n> url: %s\"%(url)\n sys.exit()\n\ndef call_api(id, attr=\"\"):\n url = '%s%s'%('https://api.mercadolibre.com/categories/', id)\n response = call(url)\n response['attribute'] = call(url+'/attributes')\n return response\n\n\ndef process(jout):\n categories = jout.get('children_categories', '')\n if categories:\n list = [ process(call_api(category.get('id'))) for category in categories]\n jout['children_categories'] = list\n return jout\n else:\n return jout\n\nif __name__ == '__main__':\n response = call_api(sys.argv[1])\n jout = process(response)\n print json.dumps(jout, sort_keys=False, indent=4, separators=(',', ': '), encoding=\"utf-8\", ensure_ascii=False)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import re
rule_regex = re.compile(r'([\.#]{5}) => ([\.#])')
grid_regex = re.compile(r'initial state: ([\.#]+)')
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
def read_input(path):
init_grid = ''
rules = []
with open(path) as infile:
cnt = 0
for line in infile:
if cnt == 0:
init_grid = grid_regex.match(line).group(1)
elif cnt > 1:
rules.append(Rule.parse(line))
cnt = cnt + 1
return init_grid, rules
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = "....." + grid + "....."
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid)-2):
for rule in rules:
result = apply_rule(augmented_grid[pos-2:pos+3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash-5
return ''.join(grid[first_hash:last_hash+1]), offset_delta
def find_sum(grid, offset):
sum = 0
for i in range(0,len(grid)):
if grid[i] == '#':
sum = sum + i+offset
return sum
def main():
grid, rules = read_input('./input/input.dat')
offset = 0
sum = find_sum(grid, offset)
print(grid)
for i in range(1, 1000):
new_grid, offset_delta = advance(grid, rules)
offset = offset + offset_delta
new_sum = find_sum(new_grid, offset)
sum_diff = new_sum - sum
print(i, ": grid length = ", len(new_grid), " offset = ", offset, " sum = ", new_sum)
if new_grid == grid:
print("found repeated grids:")
break
grid = new_grid
sum = new_sum
target_year = 50000000000
print("sum at {} = {}".format(target_year, new_sum + sum_diff*(target_year-i)))
if __name__== "__main__":
main()
|
normal
|
{
"blob_id": "8c683c109aba69f296b8989915b1f3b3eecd9745",
"index": 4274,
"step-1": "<mask token>\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\n<mask token>\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\n<mask token>\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0, len(grid)):\n if grid[i] == '#':\n sum = sum + i + offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, ': grid length = ', len(new_grid), ' offset = ', offset,\n ' sum = ', new_sum)\n if new_grid == grid:\n print('found repeated grids:')\n break\n grid = new_grid\n sum = new_sum\n target_year = 50000000000\n print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (\n target_year - i)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\ndef read_input(path):\n init_grid = ''\n rules = []\n with open(path) as infile:\n cnt = 0\n for line in infile:\n if cnt == 0:\n init_grid = grid_regex.match(line).group(1)\n elif cnt > 1:\n rules.append(Rule.parse(line))\n cnt = cnt + 1\n return init_grid, rules\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0, len(grid)):\n if grid[i] == '#':\n sum = sum + i + offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, ': grid length = ', len(new_grid), ' offset = ', offset,\n ' sum = ', new_sum)\n if new_grid == grid:\n print('found repeated grids:')\n break\n grid = new_grid\n sum = new_sum\n target_year = 50000000000\n print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (\n target_year - i)))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nrule_regex = re.compile('([\\\\.#]{5}) => ([\\\\.#])')\ngrid_regex = re.compile('initial state: ([\\\\.#]+)')\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\ndef read_input(path):\n init_grid = ''\n rules = []\n with open(path) as infile:\n cnt = 0\n for line in infile:\n if cnt == 0:\n init_grid = grid_regex.match(line).group(1)\n elif cnt > 1:\n rules.append(Rule.parse(line))\n cnt = cnt + 1\n return init_grid, rules\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0, len(grid)):\n if grid[i] == '#':\n sum = sum + i + offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, ': grid length = ', len(new_grid), ' offset = ', offset,\n ' sum = ', new_sum)\n if new_grid == grid:\n print('found repeated grids:')\n break\n grid = new_grid\n sum = new_sum\n target_year = 50000000000\n print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (\n target_year - i)))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import re\n\nrule_regex = re.compile(r'([\\.#]{5}) => ([\\.#])')\ngrid_regex = re.compile(r'initial state: ([\\.#]+)')\n\n\nclass Rule:\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\ndef read_input(path):\n init_grid = ''\n rules = []\n with open(path) as infile:\n cnt = 0\n for line in infile:\n if cnt == 0:\n init_grid = grid_regex.match(line).group(1)\n elif cnt > 1:\n rules.append(Rule.parse(line))\n cnt = cnt + 1\n return init_grid, rules\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = \".....\" + grid + \".....\"\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid)-2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos-2:pos+3], rule) \n if result:\n grid[pos] = result\n\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash-5\n\n return ''.join(grid[first_hash:last_hash+1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0,len(grid)):\n if grid[i] == '#':\n sum = sum + i+offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, \": grid length = \", len(new_grid), \" offset = \", offset, \" sum = \", new_sum)\n if new_grid == grid:\n print(\"found repeated grids:\")\n break\n grid = new_grid\n sum = new_sum\n\n\n target_year = 50000000000\n\n print(\"sum at {} = {}\".format(target_year, new_sum + sum_diff*(target_year-i)))\n \n \n\nif __name__== \"__main__\":\n main()\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ALL_COMMANDS = (agent, clean, config, create, dep, env, meta, release, run,
test, validate)
<|reserved_special_token_1|>
from .agent import agent
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (agent, clean, config, create, dep, env, meta, release, run,
test, validate)
<|reserved_special_token_1|>
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .agent import agent
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (
agent,
clean,
config,
create,
dep,
env,
meta,
release,
run,
test,
validate,
)
|
flexible
|
{
"blob_id": "7a69a9fd6ee5de704a580e4515586a1c1d2b8017",
"index": 5874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nALL_COMMANDS = (agent, clean, config, create, dep, env, meta, release, run,\n test, validate)\n",
"step-3": "from .agent import agent\nfrom .clean import clean\nfrom .config import config\nfrom .create import create\nfrom .dep import dep\nfrom .env import env\nfrom .meta import meta\nfrom .release import release\nfrom .run import run\nfrom .test import test\nfrom .validate import validate\nALL_COMMANDS = (agent, clean, config, create, dep, env, meta, release, run,\n test, validate)\n",
"step-4": "# (C) Datadog, Inc. 2018\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nfrom .agent import agent\nfrom .clean import clean\nfrom .config import config\nfrom .create import create\nfrom .dep import dep\nfrom .env import env\nfrom .meta import meta\nfrom .release import release\nfrom .run import run\nfrom .test import test\nfrom .validate import validate\n\nALL_COMMANDS = (\n agent,\n clean,\n config,\n create,\n dep,\n env,\n meta,\n release,\n run,\n test,\n validate,\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from vector3 import vec3
class ray:
def __init__(self, *args):
if len(args) == 0:
self.A = vec3(0,0,0)
self.B = vec3(1,0,0)
elif len(args) == 2:
if type(args[0]) != vec3 or type(args[1]) != vec3:
raise ValueError("Expected two vec3s")
else:
self.A = args[0]
self.B = args[1]
else:
raise ValueError("Expected 0 or 2 arguments, got " + len(args))
def origin(self):
return self.A
def direction(self):
return self.B
def point_at_parameter(self, t):
return self.A + t*self.B
if __name__ == "__main__":
r = ray(vec3(3,2,5.5), vec3(1,0,0))
print(r.point_at_parameter(5.0))
|
normal
|
{
"blob_id": "a73e3a07ab0ebb90fa744d3dfc8d9da119f99283",
"index": 2070,
"step-1": "<mask token>\n\n\nclass ray:\n\n def __init__(self, *args):\n if len(args) == 0:\n self.A = vec3(0, 0, 0)\n self.B = vec3(1, 0, 0)\n elif len(args) == 2:\n if type(args[0]) != vec3 or type(args[1]) != vec3:\n raise ValueError('Expected two vec3s')\n else:\n self.A = args[0]\n self.B = args[1]\n else:\n raise ValueError('Expected 0 or 2 arguments, got ' + len(args))\n\n def origin(self):\n return self.A\n <mask token>\n\n def point_at_parameter(self, t):\n return self.A + t * self.B\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ray:\n\n def __init__(self, *args):\n if len(args) == 0:\n self.A = vec3(0, 0, 0)\n self.B = vec3(1, 0, 0)\n elif len(args) == 2:\n if type(args[0]) != vec3 or type(args[1]) != vec3:\n raise ValueError('Expected two vec3s')\n else:\n self.A = args[0]\n self.B = args[1]\n else:\n raise ValueError('Expected 0 or 2 arguments, got ' + len(args))\n\n def origin(self):\n return self.A\n\n def direction(self):\n return self.B\n\n def point_at_parameter(self, t):\n return self.A + t * self.B\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ray:\n\n def __init__(self, *args):\n if len(args) == 0:\n self.A = vec3(0, 0, 0)\n self.B = vec3(1, 0, 0)\n elif len(args) == 2:\n if type(args[0]) != vec3 or type(args[1]) != vec3:\n raise ValueError('Expected two vec3s')\n else:\n self.A = args[0]\n self.B = args[1]\n else:\n raise ValueError('Expected 0 or 2 arguments, got ' + len(args))\n\n def origin(self):\n return self.A\n\n def direction(self):\n return self.B\n\n def point_at_parameter(self, t):\n return self.A + t * self.B\n\n\nif __name__ == '__main__':\n r = ray(vec3(3, 2, 5.5), vec3(1, 0, 0))\n print(r.point_at_parameter(5.0))\n",
"step-4": "from vector3 import vec3\n\n\nclass ray:\n\n def __init__(self, *args):\n if len(args) == 0:\n self.A = vec3(0, 0, 0)\n self.B = vec3(1, 0, 0)\n elif len(args) == 2:\n if type(args[0]) != vec3 or type(args[1]) != vec3:\n raise ValueError('Expected two vec3s')\n else:\n self.A = args[0]\n self.B = args[1]\n else:\n raise ValueError('Expected 0 or 2 arguments, got ' + len(args))\n\n def origin(self):\n return self.A\n\n def direction(self):\n return self.B\n\n def point_at_parameter(self, t):\n return self.A + t * self.B\n\n\nif __name__ == '__main__':\n r = ray(vec3(3, 2, 5.5), vec3(1, 0, 0))\n print(r.point_at_parameter(5.0))\n",
"step-5": "from vector3 import vec3\n\nclass ray:\n\tdef __init__(self, *args):\n\t\tif len(args) == 0:\n\t\t\tself.A = vec3(0,0,0)\n\t\t\tself.B = vec3(1,0,0)\n\t\telif len(args) == 2:\n\t\t\tif type(args[0]) != vec3 or type(args[1]) != vec3:\n\t\t\t\traise ValueError(\"Expected two vec3s\")\n\t\t\telse:\n\t\t\t\tself.A = args[0]\n\t\t\t\tself.B = args[1]\n\t\telse:\n\t\t\traise ValueError(\"Expected 0 or 2 arguments, got \" + len(args))\n\n\tdef origin(self):\n\t\treturn self.A\n\t\n\tdef direction(self):\n\t\treturn self.B\n\n\tdef point_at_parameter(self, t):\n\t\treturn self.A + t*self.B\n\n\nif __name__ == \"__main__\":\n\tr = ray(vec3(3,2,5.5), vec3(1,0,0))\n\tprint(r.point_at_parameter(5.0))\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.test import TestCase
from core.factories import CompanyFactory, EmployeeFactory
from core.pair_matcher import MaximumWeightGraphMatcher
class PairMatcherTestCase(TestCase):
def setUp(self):
self.company = CompanyFactory.create()
def test_simple(self):
employees = EmployeeFactory.create_batch(41, company=self.company)
matcher = MaximumWeightGraphMatcher()
groups = matcher.match(self.company, employees)
print('\n'.join([','.join(e.user.username for e in group) for group in
groups]))
|
normal
|
{
"blob_id": "0c68bd65cac3c8b9fd080900a00991b2d19260ee",
"index": 534,
"step-1": "<mask token>\n\n\nclass PairMatcherTestCase(TestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PairMatcherTestCase(TestCase):\n <mask token>\n\n def test_simple(self):\n employees = EmployeeFactory.create_batch(41, company=self.company)\n matcher = MaximumWeightGraphMatcher()\n groups = matcher.match(self.company, employees)\n print('\\n'.join([','.join(e.user.username for e in group) for group in\n groups]))\n",
"step-3": "<mask token>\n\n\nclass PairMatcherTestCase(TestCase):\n\n def setUp(self):\n self.company = CompanyFactory.create()\n\n def test_simple(self):\n employees = EmployeeFactory.create_batch(41, company=self.company)\n matcher = MaximumWeightGraphMatcher()\n groups = matcher.match(self.company, employees)\n print('\\n'.join([','.join(e.user.username for e in group) for group in\n groups]))\n",
"step-4": "from django.test import TestCase\nfrom core.factories import CompanyFactory, EmployeeFactory\nfrom core.pair_matcher import MaximumWeightGraphMatcher\n\n\nclass PairMatcherTestCase(TestCase):\n\n def setUp(self):\n self.company = CompanyFactory.create()\n\n def test_simple(self):\n employees = EmployeeFactory.create_batch(41, company=self.company)\n matcher = MaximumWeightGraphMatcher()\n groups = matcher.match(self.company, employees)\n print('\\n'.join([','.join(e.user.username for e in group) for group in\n groups]))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as layers
class Model(object):
def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10, keep_prob=0.5, scope="model"):
self._batch_size = batch_size
self._learning_rate = learning_rate
self._num_labels = num_labels
self._scope = scope
self._keep_prob = keep_prob
self._conv_hidden_dims = [192, 192]
with tf.variable_scope(self._scope):
self._build_model()
def _build_net(self, x, reuse=False, trainable=True, scope="inference_net"):
with tf.variable_scope(scope, reuse=reuse):
out = x
for i in range(len(self._conv_hidden_dims)):
out = layers.conv2d(out, num_outputs=self._conv_hidden_dims[i], kernel_size=(5, 5),
activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)
out = layers.max_pool2d(out, kernel_size=(2, 2))
out = layers.flatten(out)
out = layers.fully_connected(out, num_outputs=1000, activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)
logits = layers.fully_connected(out, self._num_labels, trainable=trainable)
return logits
def _build_model(self):
self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_') # data gets loaded as a 32x32 vector
x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x') # CIFAR dataset is shape 32,32,3
self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels], name='y') # 10 labels
# self.keep_prob = tf.placeholder(tf.float32, name='dropout_prob')
self.lr = tf.placeholder(tf.float32, shape=(), name='lr')
self.logits = self._build_net(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)
self.loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9, use_nesterov=True)
self.train_op = optimizer.minimize(loss=self.loss)
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))
# for eval steps
self.val_logits = self._build_net(x, reuse=True, trainable=False)
self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.acc)
self.merged = tf.summary.merge_all()
|
normal
|
{
"blob_id": "e9a1fd8464f6c1e65aa2c1af60becbfcbf050814",
"index": 7390,
"step-1": "<mask token>\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'\n ):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims\n [i], kernel_size=(5, 5), activation_fn=tf.nn.relu,\n trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000,\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels,\n trainable=trainable)\n return logits\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'\n ):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims\n [i], kernel_size=(5, 5), activation_fn=tf.nn.relu,\n trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000,\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels,\n trainable=trainable)\n return logits\n\n def _build_model(self):\n self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_')\n x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x')\n self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels],\n name='y')\n self.lr = tf.placeholder(tf.float32, shape=(), name='lr')\n self.logits = self._build_net(x)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self\n .logits, labels=self.y)\n self.loss = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9,\n use_nesterov=True)\n self.train_op = optimizer.minimize(loss=self.loss)\n self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1\n ), tf.argmax(self.y, 1)), dtype=tf.float32))\n self.val_logits = self._build_net(x, reuse=True, trainable=False)\n self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.\n val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('accuracy', self.acc)\n self.merged = tf.summary.merge_all()\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.layers as layers\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'\n ):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims\n [i], kernel_size=(5, 5), activation_fn=tf.nn.relu,\n trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000,\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels,\n trainable=trainable)\n return logits\n\n def _build_model(self):\n self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_')\n x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x')\n self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels],\n name='y')\n self.lr = tf.placeholder(tf.float32, shape=(), name='lr')\n self.logits = self._build_net(x)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self\n .logits, labels=self.y)\n self.loss = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9,\n use_nesterov=True)\n self.train_op = optimizer.minimize(loss=self.loss)\n self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1\n ), tf.argmax(self.y, 1)), dtype=tf.float32))\n self.val_logits = self._build_net(x, reuse=True, trainable=False)\n self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.\n val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('accuracy', self.acc)\n self.merged = tf.summary.merge_all()\n",
"step-5": "import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.layers as layers\n\nclass Model(object):\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10, keep_prob=0.5, scope=\"model\"):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope=\"inference_net\"):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims[i], kernel_size=(5, 5),\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000, activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels, trainable=trainable)\n\n return logits\n\n def _build_model(self):\n self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_') # data gets loaded as a 32x32 vector\n x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x') # CIFAR dataset is shape 32,32,3\n self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels], name='y') # 10 labels\n # self.keep_prob = tf.placeholder(tf.float32, name='dropout_prob')\n self.lr = tf.placeholder(tf.float32, shape=(), name='lr')\n\n self.logits = self._build_net(x)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)\n self.loss = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9, use_nesterov=True)\n self.train_op = optimizer.minimize(loss=self.loss)\n self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n\n # for eval steps\n self.val_logits = self._build_net(x, reuse=True, trainable=False)\n self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('accuracy', self.acc)\n self.merged = tf.summary.merge_all()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class PrometeoAPI:
def __init__(self, user, pwd):
self.base_url = 'https://prometeoapi.com'
self.session = requests.Session()
self.__user = user
self.__pwd = pwd
self._login()
def _generate_csrf_token(self, url):
"""
This function gets the csrf token from the login page needed to
do request in order log into the website
"""
response = self.session.get(url)
content = response.content
tree = html.fromstring(content)
csrf_element = tree.xpath("//input[@name='csrfmiddlewaretoken']")[0]
csrf = csrf_element.get('value')
return csrf
<|reserved_special_token_0|>
def get_requests_current_month(self):
current_date = datetime.datetime.now()
request_url = (
f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'
)
response = self.session.get(request_url)
if response.status_code == 200:
json_table = response.json()
return json_table.get('usage_table')
<|reserved_special_token_0|>
def _strip_text(self, element):
return str(element.text_content()).strip()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrometeoAPI:
def __init__(self, user, pwd):
self.base_url = 'https://prometeoapi.com'
self.session = requests.Session()
self.__user = user
self.__pwd = pwd
self._login()
def _generate_csrf_token(self, url):
"""
This function gets the csrf token from the login page needed to
do request in order log into the website
"""
response = self.session.get(url)
content = response.content
tree = html.fromstring(content)
csrf_element = tree.xpath("//input[@name='csrfmiddlewaretoken']")[0]
csrf = csrf_element.get('value')
return csrf
<|reserved_special_token_0|>
def get_requests_current_month(self):
current_date = datetime.datetime.now()
request_url = (
f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'
)
response = self.session.get(request_url)
if response.status_code == 200:
json_table = response.json()
return json_table.get('usage_table')
def refresh_api_key(self):
csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')
headers = {'X-CSRFToken': csrf}
request_url = f'{self.base_url}/dashboard/reset-key/'
response = self.session.post(request_url, headers=headers)
self.api_key = response.json().get('api_key')
return self.api_key
def _strip_text(self, element):
return str(element.text_content()).strip()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv()
class PrometeoAPI:
def __init__(self, user, pwd):
self.base_url = 'https://prometeoapi.com'
self.session = requests.Session()
self.__user = user
self.__pwd = pwd
self._login()
def _generate_csrf_token(self, url):
"""
This function gets the csrf token from the login page needed to
do request in order log into the website
"""
response = self.session.get(url)
content = response.content
tree = html.fromstring(content)
csrf_element = tree.xpath("//input[@name='csrfmiddlewaretoken']")[0]
csrf = csrf_element.get('value')
return csrf
def _login(self):
"""
This function takes the username and password, logs in and sets api_key, user name, and
ammount of requests of the month, data available from the dashboard recieved after the log in
"""
url = f'{self.base_url}/dashboard/login/'
csrf = self._generate_csrf_token(url)
payload = {'csrfmiddlewaretoken': csrf, 'username': self.__user,
'password': self.__pwd}
response = self.session.request('POST', url, data=payload)
tree = html.fromstring(response.content)
page_title_element = tree.xpath('//title')[0]
page_title = str(page_title_element.text_content()).strip()
if 'Login - Prometeo' in page_title:
error = tree.xpath("//div[contains(@class,'alert')]")[0]
error_msj = self._strip_text(error)
raise Exception(
f'Failed to log into the site, response text: {error_msj}')
username_element = tree.xpath(
"//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]"
)[0]
self.username = self._strip_text(username_element)
api_key_element = tree.xpath("//p[contains(@class,'api-key-field')]")[0
]
self.api_key = self._strip_text(api_key_element)
def get_requests_current_month(self):
current_date = datetime.datetime.now()
request_url = (
f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'
)
response = self.session.get(request_url)
if response.status_code == 200:
json_table = response.json()
return json_table.get('usage_table')
def refresh_api_key(self):
csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')
headers = {'X-CSRFToken': csrf}
request_url = f'{self.base_url}/dashboard/reset-key/'
response = self.session.post(request_url, headers=headers)
self.api_key = response.json().get('api_key')
return self.api_key
def _strip_text(self, element):
return str(element.text_content()).strip()
if __name__ == '__main__':
api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.
environ.get('PROMETEO_PASSWORD'))
print(api.api_key)
print(api.username)
print(api.refresh_api_key())
pp(api.get_requests_current_month())
<|reserved_special_token_1|>
import os
import requests
from pprint import pprint as pp
from lxml import html
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import datetime
load_dotenv()
class PrometeoAPI:
def __init__(self, user, pwd):
self.base_url = 'https://prometeoapi.com'
self.session = requests.Session()
self.__user = user
self.__pwd = pwd
self._login()
def _generate_csrf_token(self, url):
"""
This function gets the csrf token from the login page needed to
do request in order log into the website
"""
response = self.session.get(url)
content = response.content
tree = html.fromstring(content)
csrf_element = tree.xpath("//input[@name='csrfmiddlewaretoken']")[0]
csrf = csrf_element.get('value')
return csrf
def _login(self):
"""
This function takes the username and password, logs in and sets api_key, user name, and
ammount of requests of the month, data available from the dashboard recieved after the log in
"""
url = f'{self.base_url}/dashboard/login/'
csrf = self._generate_csrf_token(url)
payload = {'csrfmiddlewaretoken': csrf, 'username': self.__user,
'password': self.__pwd}
response = self.session.request('POST', url, data=payload)
tree = html.fromstring(response.content)
page_title_element = tree.xpath('//title')[0]
page_title = str(page_title_element.text_content()).strip()
if 'Login - Prometeo' in page_title:
error = tree.xpath("//div[contains(@class,'alert')]")[0]
error_msj = self._strip_text(error)
raise Exception(
f'Failed to log into the site, response text: {error_msj}')
username_element = tree.xpath(
"//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]"
)[0]
self.username = self._strip_text(username_element)
api_key_element = tree.xpath("//p[contains(@class,'api-key-field')]")[0
]
self.api_key = self._strip_text(api_key_element)
def get_requests_current_month(self):
current_date = datetime.datetime.now()
request_url = (
f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'
)
response = self.session.get(request_url)
if response.status_code == 200:
json_table = response.json()
return json_table.get('usage_table')
def refresh_api_key(self):
csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')
headers = {'X-CSRFToken': csrf}
request_url = f'{self.base_url}/dashboard/reset-key/'
response = self.session.post(request_url, headers=headers)
self.api_key = response.json().get('api_key')
return self.api_key
def _strip_text(self, element):
return str(element.text_content()).strip()
if __name__ == '__main__':
api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.
environ.get('PROMETEO_PASSWORD'))
print(api.api_key)
print(api.username)
print(api.refresh_api_key())
pp(api.get_requests_current_month())
<|reserved_special_token_1|>
import os
import requests
from pprint import pprint as pp
from lxml import html
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import datetime
load_dotenv()
class PrometeoAPI:
def __init__(self, user, pwd):
self.base_url = 'https://prometeoapi.com'
self.session = requests.Session()
self.__user = user
self.__pwd = pwd
self._login()
def _generate_csrf_token(self, url):
'''
This function gets the csrf token from the login page needed to
do request in order log into the website
'''
response = self.session.get(url)
content = response.content
tree = html.fromstring(content)
csrf_element = tree.xpath("//input[@name='csrfmiddlewaretoken']")[0]
csrf = csrf_element.get('value')
return csrf
def _login(self):
'''
This function takes the username and password, logs in and sets api_key, user name, and
ammount of requests of the month, data available from the dashboard recieved after the log in
'''
url = f'{self.base_url}/dashboard/login/'
csrf = self._generate_csrf_token(url)
payload = {
'csrfmiddlewaretoken': csrf,
'username': self.__user,
'password': self.__pwd
}
response = self.session.request('POST', url, data=payload)
tree = html.fromstring(response.content)
page_title_element = tree.xpath("//title")[0]
page_title = str(page_title_element.text_content()).strip()
if 'Login - Prometeo' in page_title:
error = tree.xpath("//div[contains(@class,'alert')]")[0]
error_msj = self._strip_text(error)
raise Exception(f'Failed to log into the site, response text: {error_msj}')
username_element = tree.xpath("//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]")[
0]
self.username = self._strip_text(username_element)
api_key_element = tree.xpath("//p[contains(@class,'api-key-field')]")[0]
self.api_key = self._strip_text(api_key_element)
# requests_mes_element = tree.xpath("//p[contains(.,'Requests este mes:')]/b")[0]
# self.requests_mes = str(requests_mes_element.text_content()).strip()
def get_requests_current_month(self):
current_date = datetime.datetime.now()
request_url = f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'
response = self.session.get(request_url)
if response.status_code == 200:
json_table = response.json()
return json_table.get('usage_table')
def refresh_api_key(self):
csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')
headers = {'X-CSRFToken': csrf}
request_url = f'{self.base_url}/dashboard/reset-key/'
response = self.session.post(request_url, headers=headers)
self.api_key = response.json().get('api_key')
return self.api_key
def _strip_text(self, element):
return str(element.text_content()).strip()
if __name__ == '__main__':
api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.environ.get('PROMETEO_PASSWORD'))
print(api.api_key)
print(api.username)
print(api.refresh_api_key())
pp(api.get_requests_current_month())
|
flexible
|
{
"blob_id": "f3e654a589cc1c16b36203dd358671d0426556e6",
"index": 2676,
"step-1": "<mask token>\n\n\nclass PrometeoAPI:\n\n def __init__(self, user, pwd):\n self.base_url = 'https://prometeoapi.com'\n self.session = requests.Session()\n self.__user = user\n self.__pwd = pwd\n self._login()\n\n def _generate_csrf_token(self, url):\n \"\"\"\n This function gets the csrf token from the login page needed to\n do request in order log into the website\n\n \"\"\"\n response = self.session.get(url)\n content = response.content\n tree = html.fromstring(content)\n csrf_element = tree.xpath(\"//input[@name='csrfmiddlewaretoken']\")[0]\n csrf = csrf_element.get('value')\n return csrf\n <mask token>\n\n def get_requests_current_month(self):\n current_date = datetime.datetime.now()\n request_url = (\n f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'\n )\n response = self.session.get(request_url)\n if response.status_code == 200:\n json_table = response.json()\n return json_table.get('usage_table')\n <mask token>\n\n def _strip_text(self, element):\n return str(element.text_content()).strip()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PrometeoAPI:\n\n def __init__(self, user, pwd):\n self.base_url = 'https://prometeoapi.com'\n self.session = requests.Session()\n self.__user = user\n self.__pwd = pwd\n self._login()\n\n def _generate_csrf_token(self, url):\n \"\"\"\n This function gets the csrf token from the login page needed to\n do request in order log into the website\n\n \"\"\"\n response = self.session.get(url)\n content = response.content\n tree = html.fromstring(content)\n csrf_element = tree.xpath(\"//input[@name='csrfmiddlewaretoken']\")[0]\n csrf = csrf_element.get('value')\n return csrf\n <mask token>\n\n def get_requests_current_month(self):\n current_date = datetime.datetime.now()\n request_url = (\n f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'\n )\n response = self.session.get(request_url)\n if response.status_code == 200:\n json_table = response.json()\n return json_table.get('usage_table')\n\n def refresh_api_key(self):\n csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')\n headers = {'X-CSRFToken': csrf}\n request_url = f'{self.base_url}/dashboard/reset-key/'\n response = self.session.post(request_url, headers=headers)\n self.api_key = response.json().get('api_key')\n return self.api_key\n\n def _strip_text(self, element):\n return str(element.text_content()).strip()\n\n\n<mask token>\n",
"step-3": "<mask token>\nload_dotenv()\n\n\nclass PrometeoAPI:\n\n def __init__(self, user, pwd):\n self.base_url = 'https://prometeoapi.com'\n self.session = requests.Session()\n self.__user = user\n self.__pwd = pwd\n self._login()\n\n def _generate_csrf_token(self, url):\n \"\"\"\n This function gets the csrf token from the login page needed to\n do request in order log into the website\n\n \"\"\"\n response = self.session.get(url)\n content = response.content\n tree = html.fromstring(content)\n csrf_element = tree.xpath(\"//input[@name='csrfmiddlewaretoken']\")[0]\n csrf = csrf_element.get('value')\n return csrf\n\n def _login(self):\n \"\"\"\n This function takes the username and password, logs in and sets api_key, user name, and\n ammount of requests of the month, data available from the dashboard recieved after the log in\n \"\"\"\n url = f'{self.base_url}/dashboard/login/'\n csrf = self._generate_csrf_token(url)\n payload = {'csrfmiddlewaretoken': csrf, 'username': self.__user,\n 'password': self.__pwd}\n response = self.session.request('POST', url, data=payload)\n tree = html.fromstring(response.content)\n page_title_element = tree.xpath('//title')[0]\n page_title = str(page_title_element.text_content()).strip()\n if 'Login - Prometeo' in page_title:\n error = tree.xpath(\"//div[contains(@class,'alert')]\")[0]\n error_msj = self._strip_text(error)\n raise Exception(\n f'Failed to log into the site, response text: {error_msj}')\n username_element = tree.xpath(\n \"//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]\"\n )[0]\n self.username = self._strip_text(username_element)\n api_key_element = tree.xpath(\"//p[contains(@class,'api-key-field')]\")[0\n ]\n self.api_key = self._strip_text(api_key_element)\n\n def get_requests_current_month(self):\n current_date = datetime.datetime.now()\n request_url = (\n f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'\n )\n response = self.session.get(request_url)\n if response.status_code == 200:\n json_table = response.json()\n return json_table.get('usage_table')\n\n def refresh_api_key(self):\n csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')\n headers = {'X-CSRFToken': csrf}\n request_url = f'{self.base_url}/dashboard/reset-key/'\n response = self.session.post(request_url, headers=headers)\n self.api_key = response.json().get('api_key')\n return self.api_key\n\n def _strip_text(self, element):\n return str(element.text_content()).strip()\n\n\nif __name__ == '__main__':\n api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.\n environ.get('PROMETEO_PASSWORD'))\n print(api.api_key)\n print(api.username)\n print(api.refresh_api_key())\n pp(api.get_requests_current_month())\n",
"step-4": "import os\nimport requests\nfrom pprint import pprint as pp\nfrom lxml import html\nfrom bs4 import BeautifulSoup\nfrom dotenv import load_dotenv\nimport datetime\nload_dotenv()\n\n\nclass PrometeoAPI:\n\n def __init__(self, user, pwd):\n self.base_url = 'https://prometeoapi.com'\n self.session = requests.Session()\n self.__user = user\n self.__pwd = pwd\n self._login()\n\n def _generate_csrf_token(self, url):\n \"\"\"\n This function gets the csrf token from the login page needed to\n do request in order log into the website\n\n \"\"\"\n response = self.session.get(url)\n content = response.content\n tree = html.fromstring(content)\n csrf_element = tree.xpath(\"//input[@name='csrfmiddlewaretoken']\")[0]\n csrf = csrf_element.get('value')\n return csrf\n\n def _login(self):\n \"\"\"\n This function takes the username and password, logs in and sets api_key, user name, and\n ammount of requests of the month, data available from the dashboard recieved after the log in\n \"\"\"\n url = f'{self.base_url}/dashboard/login/'\n csrf = self._generate_csrf_token(url)\n payload = {'csrfmiddlewaretoken': csrf, 'username': self.__user,\n 'password': self.__pwd}\n response = self.session.request('POST', url, data=payload)\n tree = html.fromstring(response.content)\n page_title_element = tree.xpath('//title')[0]\n page_title = str(page_title_element.text_content()).strip()\n if 'Login - Prometeo' in page_title:\n error = tree.xpath(\"//div[contains(@class,'alert')]\")[0]\n error_msj = self._strip_text(error)\n raise Exception(\n f'Failed to log into the site, response text: {error_msj}')\n username_element = tree.xpath(\n \"//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]\"\n )[0]\n self.username = self._strip_text(username_element)\n api_key_element = tree.xpath(\"//p[contains(@class,'api-key-field')]\")[0\n ]\n self.api_key = self._strip_text(api_key_element)\n\n def get_requests_current_month(self):\n current_date = datetime.datetime.now()\n request_url = (\n f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'\n )\n response = self.session.get(request_url)\n if response.status_code == 200:\n json_table = response.json()\n return json_table.get('usage_table')\n\n def refresh_api_key(self):\n csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')\n headers = {'X-CSRFToken': csrf}\n request_url = f'{self.base_url}/dashboard/reset-key/'\n response = self.session.post(request_url, headers=headers)\n self.api_key = response.json().get('api_key')\n return self.api_key\n\n def _strip_text(self, element):\n return str(element.text_content()).strip()\n\n\nif __name__ == '__main__':\n api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.\n environ.get('PROMETEO_PASSWORD'))\n print(api.api_key)\n print(api.username)\n print(api.refresh_api_key())\n pp(api.get_requests_current_month())\n",
"step-5": "import os\n\nimport requests\nfrom pprint import pprint as pp\nfrom lxml import html\nfrom bs4 import BeautifulSoup\nfrom dotenv import load_dotenv\nimport datetime\n\nload_dotenv()\n\n\nclass PrometeoAPI:\n def __init__(self, user, pwd):\n self.base_url = 'https://prometeoapi.com'\n self.session = requests.Session()\n self.__user = user\n self.__pwd = pwd\n self._login()\n\n def _generate_csrf_token(self, url):\n '''\n This function gets the csrf token from the login page needed to\n do request in order log into the website\n\n '''\n response = self.session.get(url)\n\n content = response.content\n tree = html.fromstring(content)\n\n csrf_element = tree.xpath(\"//input[@name='csrfmiddlewaretoken']\")[0]\n csrf = csrf_element.get('value')\n\n return csrf\n\n def _login(self):\n '''\n This function takes the username and password, logs in and sets api_key, user name, and\n ammount of requests of the month, data available from the dashboard recieved after the log in\n '''\n\n url = f'{self.base_url}/dashboard/login/'\n\n csrf = self._generate_csrf_token(url)\n\n payload = {\n 'csrfmiddlewaretoken': csrf,\n 'username': self.__user,\n 'password': self.__pwd\n }\n\n response = self.session.request('POST', url, data=payload)\n\n tree = html.fromstring(response.content)\n\n page_title_element = tree.xpath(\"//title\")[0]\n page_title = str(page_title_element.text_content()).strip()\n\n if 'Login - Prometeo' in page_title:\n error = tree.xpath(\"//div[contains(@class,'alert')]\")[0]\n error_msj = self._strip_text(error)\n raise Exception(f'Failed to log into the site, response text: {error_msj}')\n\n username_element = tree.xpath(\"//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]\")[\n 0]\n self.username = self._strip_text(username_element)\n\n api_key_element = tree.xpath(\"//p[contains(@class,'api-key-field')]\")[0]\n self.api_key = self._strip_text(api_key_element)\n\n # requests_mes_element = tree.xpath(\"//p[contains(.,'Requests este mes:')]/b\")[0]\n # self.requests_mes = str(requests_mes_element.text_content()).strip()\n\n def get_requests_current_month(self):\n\n current_date = datetime.datetime.now()\n\n request_url = f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'\n response = self.session.get(request_url)\n\n if response.status_code == 200:\n json_table = response.json()\n return json_table.get('usage_table')\n\n def refresh_api_key(self):\n csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')\n headers = {'X-CSRFToken': csrf}\n\n request_url = f'{self.base_url}/dashboard/reset-key/'\n response = self.session.post(request_url, headers=headers)\n self.api_key = response.json().get('api_key')\n\n return self.api_key\n\n def _strip_text(self, element):\n return str(element.text_content()).strip()\n\n\nif __name__ == '__main__':\n api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.environ.get('PROMETEO_PASSWORD'))\n\n print(api.api_key)\n print(api.username)\n print(api.refresh_api_key())\n pp(api.get_requests_current_month())\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
import time
class Block:
def __init__(self, index, transactions, previous_hash, nonce=0):
self.index = index
self.transaction = transactions
self.timestamp = time.time()
self.previous_hash = previous_hash
self.nonce = nonce
self.hash = None
|
normal
|
{
"blob_id": "43a23958b8c8779e3292f0f523a37b6d712fdbac",
"index": 4448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Block:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Block:\n\n def __init__(self, index, transactions, previous_hash, nonce=0):\n self.index = index\n self.transaction = transactions\n self.timestamp = time.time()\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = None\n",
"step-4": "import time\n\n\nclass Block:\n\n def __init__(self, index, transactions, previous_hash, nonce=0):\n self.index = index\n self.transaction = transactions\n self.timestamp = time.time()\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn import preprocessing
from random import shuffle
import numpy as np
import collections
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras import backend as K
from gensim.models.keyedvectors import KeyedVectors
from nltk.tokenize import TreebankWordTokenizer
import re
import pickle
import os
import yaml
import pandas
from typing import List
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import losses, optimizers
from early_stopping import EarlyStoppingAtMaxMacroF1
import json
import hashlib
SEED = 7
def read_csv_json(file_name) -> pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
def use_only_alphanumeric(input):
pattern = re.compile('[\W^\'\"]+')
output = pattern.sub(' ', input).strip()
return output
def tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):
vectorized_data = []
# probably could be optimized further
ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]
token_list = [tokenizer.tokenize(sample) for sample in ds1]
for tokens in token_list:
vecs = []
for token in tokens:
try:
vecs.append(embedding_vector[token].tolist())
except KeyError:
# print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))
np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))
unk_vec = np.random.rand(embedding_dims)
vecs.append(unk_vec.tolist())
continue
vectorized_data.append(vecs)
return vectorized_data
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
# Create a vector of 0s the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
# Append the appropriate number 0 vectors to the list
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
'''
save model based on model, encoder
'''
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, "w") as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, "log.json"), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
#le = preprocessing.LabelEncoder()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise ("Session is not initialized")
if graph is None:
raise ("Graph is not initialized")
if model is None:
raise ("Model is not initialized")
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return (probs, preds)
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
(x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.categories_[0]))
model.compile(
loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),
#metrics=['categorical_accuracy'],
optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001
#optimizer=optimizers.Adam(learning_rate=5e-4)
)
# early stopping callback using validation loss
callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=5,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
)
#callback = EarlyStoppingAtMaxMacroF1(
# patience=100, # record all epochs
# validation=(x_va, y_va)
#)
print('start training')
history = model.fit(x_train, y_train,
batch_size=self.model_cfg['batch_size'],
epochs=100,
validation_split=va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else None,
callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(f'finished training in {len(history.history["loss"])} epochs')
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
# return training history
return history.history
def __preprocess(self, dataset, le_encoder=None):
'''
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
'''
shuffle(dataset)
data = [s['data'] for s in dataset]
#labels = [s['label'] for s in dataset]
labels = [[s['label']] for s in dataset]
#le_encoder = preprocessing.LabelEncoder()
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))
#print('train %s intents with %s samples' % (len(set(labels)), len(data)))
#print(collections.Counter(labels))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])
# split_point = int(len(vectorized_data) * .9)
x_train = vectorized_data # vectorized_data[:split_point]
y_train = encoded_labels # encoded_labels[:split_point]
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(
filters,
kernel_size,
padding='valid',
activation=activation_type,
strides=strides,
input_shape=(maxlen, embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
(model, le) = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
(probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i],
'embeddings': x_train[i],
#'label': r,
'label': r.item(),
'highestProb': max(probs[i]),
#'prob': dict(zip(self.le_encoder.classes_, probs[i]))
'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))
} for i, r in enumerate(results)]
return output
|
normal
|
{
"blob_id": "23f491bbf26ede9052ecdab04b8c00cc78db5a7e",
"index": 8831,
"step-1": "<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\n<mask token>\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-2": "<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\n<mask token>\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-3": "<mask token>\ntf.disable_v2_behavior()\n<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\\\W^\\'\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims\n ):\n vectorized_data = []\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),\n 16) % 10 ** 6)\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-4": "from sklearn import preprocessing\nfrom random import shuffle\nimport numpy as np\nimport collections\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras import backend as K\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.tokenize import TreebankWordTokenizer\nimport re\nimport pickle\nimport os\nimport yaml\nimport pandas\nfrom typing import List\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import losses, optimizers\nfrom early_stopping import EarlyStoppingAtMaxMacroF1\nimport json\nimport hashlib\nSEED = 7\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\\\W^\\'\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims\n ):\n vectorized_data = []\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),\n 16) % 10 ** 6)\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-5": "from sklearn import preprocessing\nfrom random import shuffle\nimport numpy as np\nimport collections\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras import backend as K\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.tokenize import TreebankWordTokenizer\nimport re\nimport pickle\nimport os\n\nimport yaml\nimport pandas\nfrom typing import List\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import losses, optimizers\nfrom early_stopping import EarlyStoppingAtMaxMacroF1\nimport json\nimport hashlib\n\nSEED = 7\n\n\ndef read_csv_json(file_name) -> pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\W^\\'\\\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):\n vectorized_data = []\n # probably could be optimized further\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n # print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n # Create a vector of 0s the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n # Append the appropriate number 0 vectors to the list\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n '''\n save model based on model, encoder\n '''\n\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, \"w\") as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, \"log.json\"), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, \"r\") as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n #le = preprocessing.LabelEncoder()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise (\"Session is not initialized\")\n if graph is None:\n raise (\"Graph is not initialized\")\n if model is None:\n raise (\"Model is not initialized\")\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return (probs, preds)\n\n\nclass Model:\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n (x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.categories_[0]))\n model.compile(\n loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),\n #metrics=['categorical_accuracy'],\n optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001\n #optimizer=optimizers.Adam(learning_rate=5e-4)\n )\n # early stopping callback using validation loss \n callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0,\n patience=5,\n verbose=0,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=True,\n )\n #callback = EarlyStoppingAtMaxMacroF1(\n # patience=100, # record all epochs\n # validation=(x_va, y_va)\n #)\n\n print('start training')\n history = model.fit(x_train, y_train,\n batch_size=self.model_cfg['batch_size'],\n epochs=100,\n validation_split=va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else None,\n callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(f'finished training in {len(history.history[\"loss\"])} epochs')\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n # return training history \n return history.history\n \n def __preprocess(self, dataset, le_encoder=None):\n '''\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n '''\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n #labels = [s['label'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n #le_encoder = preprocessing.LabelEncoder()\n if le_encoder is None: \n le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))\n #print('train %s intents with %s samples' % (len(set(labels)), len(data)))\n #print(collections.Counter(labels))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])\n\n # split_point = int(len(vectorized_data) * .9)\n x_train = vectorized_data # vectorized_data[:split_point]\n y_train = encoded_labels # encoded_labels[:split_point]\n\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n\n model.add(Conv1D(\n filters,\n kernel_size,\n padding='valid',\n activation=activation_type,\n strides=strides,\n input_shape=(maxlen, embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n (model, le) = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n (probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i],\n 'embeddings': x_train[i],\n #'label': r,\n 'label': r.item(),\n 'highestProb': max(probs[i]),\n #'prob': dict(zip(self.le_encoder.classes_, probs[i]))\n 'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))\n } for i, r in enumerate(results)]\n return output\n",
"step-ids": [
9,
13,
16,
18,
19
]
}
|
[
9,
13,
16,
18,
19
] |
class A(object):
_a ='d'
@staticmethod
def func_1():
A._a = 'b'
print A._a
@classmethod
def func_3(cls):
print cls._a
def func_2(self):
# self._a = 'c'
print self._a
# print A._a
#
# class B(object):
# @staticmethod
# def func_1():
# A.___a = 'c'
# print A.___a
# print A.___a
# B.func_1()
print A._a
# A.func_3()
A.func_1()
# A().func_2()
A.func_1()
A.func_3()
# print A().a
print A._a
|
normal
|
{
"blob_id": "2ab3adb4d0ed7e6e48afb2a8dab8f9250d335723",
"index": 2253,
"step-1": "class A(object):\n _a ='d'\n\n\n @staticmethod\n def func_1():\n A._a = 'b'\n print A._a\n\n @classmethod\n def func_3(cls):\n print cls._a\n\n def func_2(self):\n # self._a = 'c'\n print self._a\n\n# print A._a\n\n#\n# class B(object):\n# @staticmethod\n# def func_1():\n# A.___a = 'c'\n# print A.___a\n# print A.___a\n\n\n# B.func_1()\nprint A._a\n# A.func_3()\nA.func_1()\n# A().func_2()\nA.func_1()\nA.func_3()\n# print A().a\nprint A._a\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Author: Yudong Qiu
Functions for solving unrestricted Hartree-Fock
"""
import numpy as np
from qc_python import basis_integrals
from qc_python.common import chemical_elements, calc_nuclear_repulsion
def solve_unrestricted_hartree_fock(elems, coords, basis_set, charge=0, spinmult=1, maxiter=150, enable_DIIS=True, verbose=False):
""" Unrestricted Hartree-Fock """
# Compute the number of electrons in the system
n_electron = sum(chemical_elements.index(e) for e in elems) - charge
if verbose:
print("This system has a total of %d electrons" % n_electron)
n_single_e = spinmult - 1
if (n_electron+n_single_e) % 2 != 0:
raise RuntimeError("The specified charge %d and spinmult %d is impossible!" % (charge, spinmult))
# number of alpha and beta orbitals
n_a = int((n_electron + n_single_e) / 2)
n_b = n_electron - n_a
# compute nuclear repulsion energy
E_nuc = calc_nuclear_repulsion(elems, coords)
# compute one-electron integral matrices
Smat, Tmat, Vmat = basis_integrals.build_one_e_matrices(elems, coords, basis_set)
Hmat = Tmat + Vmat
# check if we have enough basis functions to hold all the electrons
if n_a > len(Smat):
raise RuntimeError("Number of basis functions is smaller than number of alpha orbitals")
# build two-electron integral tensor g = (pq|rs)
G_ao = basis_integrals.build_two_electron_tensor(elems, coords, basis_set)
if verbose:
print("One Electron Integrals Calculated:")
print("\nOverlap matrix S")
print(Smat)
print("\nKinetic energy matrix T")
print(Tmat)
print("\nNuclear attraction matrix V")
print(Vmat)
print("\nCore Hamiltonian matrix H = T + V")
print(Hmat)
print("\nTwo-electron Integrals G")
print(G_ao)
# Solve the FC = ESC equation by converting it to Ft C' = E C'
# Diagonalize overlap matrix and form S^(-1/2) matrix
Seigval, Seigvec = np.linalg.eig(Smat)
Shalf = np.diag(Seigval**-0.5)
Shalf = np.dot(Seigvec, np.dot(Shalf, Seigvec.T))
# intial guess density
Dmat_a = np.zeros_like(Smat)
Dmat_b = np.zeros_like(Smat)
E_hf = 0
converged = False
# DIIS
if enable_DIIS is True:
n_err_mat = 6
diis_start_n = 4
diis_err_mats = []
#diis_err_mats_a = []
#diis_err_mats_b = []
diis_fmats_a = []
diis_fmats_b = []
if verbose:
print(" *** DIIS Enabled ***")
if verbose:
print("\n *** SCF Iterations *** ")
print("Iter HF Energy delta E RMS |D|")
print("-------------------------------------------------------")
for i in range(maxiter):
Fmat = Hmat + np.einsum("rs,pqrs->pq",(Dmat_a+Dmat_b),G_ao)
Fmat_a = Fmat - np.einsum("rs,prqs->pq",Dmat_a,G_ao)
Fmat_b = Fmat - np.einsum("rs,prqs->pq",Dmat_b,G_ao)
if enable_DIIS and i > 0:
## DIIS for alpha spin
#FDS = np.einsum("pi,ij,jq->pq",Fmat_a,Dmat_a,Smat)
#SDF = np.einsum("pi,ij,jq->pq",Smat,Dmat_a,Fmat_a)
#diis_err_mats_a.append(FDS-SDF)
#diis_err_mats_a = diis_err_mats_a[-n_err_mat:]
diis_fmats_a.append(Fmat_a)
diis_fmats_a = diis_fmats_a[-n_err_mat:]
## DIIS for beta spin
#FDS = np.einsum("pi,ij,jq->pq",Fmat_b,Dmat_b,Smat)
#SDF = np.einsum("pi,ij,jq->pq",Smat,Dmat_b,Fmat_b)
#diis_err_mats_b.append(FDS-SDF)
#diis_err_mats_b = diis_err_mats_b[-n_err_mat:]
diis_fmats_b.append(Fmat_b)
diis_fmats_b = diis_fmats_b[-n_err_mat:]
FDS_a = np.einsum("pi,ij,jq->pq",Fmat_a,Dmat_a,Smat)
SDF_a = np.einsum("pi,ij,jq->pq",Smat,Dmat_a,Fmat_a)
FDS_b = np.einsum("pi,ij,jq->pq",Fmat_b,Dmat_b,Smat)
SDF_b = np.einsum("pi,ij,jq->pq",Smat,Dmat_b,Fmat_b)
diis_err_mats.append(FDS_a-SDF_a+FDS_b-SDF_b)
diis_err_mats = diis_err_mats[-n_err_mat:]
# compute Bmat_ij = Err_i . Err_j
n_diis = len(diis_err_mats)
if n_diis >= diis_start_n:
Fmat_a = DIIS_extrapolate_F(diis_err_mats, diis_fmats_a)
Fmat_b = DIIS_extrapolate_F(diis_err_mats, diis_fmats_b)
# solve the alpha HF equation F_a C_a = e_a S C_a
Feigval_a, Cmat_a = solve_FCeSC(Fmat_a, Shalf)
C_occ = Cmat_a[:, :n_a]
Dmat_a_new = np.dot(C_occ, C_occ.T)
# solve the beta HF equation F_b C_b = e_b S C_b
Feigval_b, Cmat_b = solve_FCeSC(Fmat_b, Shalf)
C_occ = Cmat_b[:, :n_b]
Dmat_b_new = np.dot(C_occ, C_occ.T)
E_hf_new = 0.5 * (np.einsum("pq,pq", Dmat_a, Hmat+Fmat_a) + np.einsum("pq,pq", Dmat_b, Hmat+Fmat_b))
dE = E_hf_new - E_hf
D_rms = np.sqrt(np.mean((Dmat_a_new-Dmat_a)**2)) + np.sqrt(np.mean((Dmat_b_new-Dmat_b)**2))
# update E_hf and Dmat
E_hf = E_hf_new
Dmat_a = Dmat_a_new
Dmat_b = Dmat_b_new
# print iteration information
if verbose is True:
print(" %-4d %17.10f %14.4e %14.4e" %(i, E_hf, dE, D_rms))
# check convergence
if abs(dE) < 1.0E-10 and abs(D_rms) < 1.0E-8:
converged = True
break
if converged == False:
print("SCF didn't converge in %d iterations!" % maxiter)
raise RuntimeError
E_total = E_nuc + E_hf
if verbose:
print("\nSCF converged!")
print("\nOrbital Energies (Eh) and coefficients for Alpha electrons")
print('E: '+''.join(["%17.7f"%e for e in Feigval_a]))
print('-' * (17 * len(Feigval_a) + 4))
for i,row in enumerate(Cmat_a):
print('c%-3d'%i + ''.join(["%17.7f"%c for c in row]))
print("\nOrbital Energies (Eh) and coefficients for Beta electrons")
print('E: '+''.join(["%17.7f"%e for e in Feigval_b]))
print('-' * (17 * len(Feigval_b) + 4))
for i,row in enumerate(Cmat_b):
print('c%-3d'%i + ''.join(["%17.7f"%c for c in row]))
print("\nNuclear Repulsion Energy = %17.10f Eh" % E_nuc)
print("Total Electronic Energy = %17.10f Eh" % E_hf)
print("Final Total Energy = %17.10f Eh" % E_total)
return {"E_nuc":E_nuc, "E_hf": E_hf, "E_total":E_total, "E_orbs_a": Feigval_a, "Cmat_a": Cmat_a, "Dmat_a": Dmat_a,
"E_orbs_b": Feigval_b, "Cmat_b": Cmat_b, "Dmat_b": Dmat_b}
def solve_FCeSC(Fmat, Shalf):
Ft = np.einsum("pi,ij,jq->pq",Shalf,Fmat,Shalf)
Feigval, Feigvec = np.linalg.eigh(Ft)
idx = Feigval.argsort()
Feigval = Feigval[idx]
Feigvec = Feigvec[:,idx]
Cmat = np.dot(Shalf, Feigvec)
return Feigval, Cmat
def DIIS_extrapolate_F(diis_err_mats, diis_fmats):
n_diis = len(diis_err_mats)
assert n_diis == len(diis_fmats), 'Number of Fock matrices should equal to number of error matrices'
Bmat = -np.ones([n_diis+1, n_diis+1])
for di in range(n_diis):
for dj in range(di, n_diis):
Bmat[di,dj] = Bmat[dj,di] = np.dot(diis_err_mats[di].ravel(), diis_err_mats[dj].ravel())
Bmat[-1,-1] = 0
# Solve the equation Bmat * C = [0,0,..,-1]
right_vec = np.zeros(n_diis+1)
right_vec[-1] = -1
C_array = np.linalg.solve(Bmat, right_vec)
# Form the new guess Fmat
new_Fmat = np.zeros_like(diis_fmats[-1])
for di in range(n_diis):
new_Fmat += C_array[di] * diis_fmats[di]
return new_Fmat
|
normal
|
{
"blob_id": "ccc2a976d06e2fa6c91b25c4f95a8f0da32e9b5e",
"index": 7878,
"step-1": "<mask token>\n\n\ndef DIIS_extrapolate_F(diis_err_mats, diis_fmats):\n n_diis = len(diis_err_mats)\n assert n_diis == len(diis_fmats\n ), 'Number of Fock matrices should equal to number of error matrices'\n Bmat = -np.ones([n_diis + 1, n_diis + 1])\n for di in range(n_diis):\n for dj in range(di, n_diis):\n Bmat[di, dj] = Bmat[dj, di] = np.dot(diis_err_mats[di].ravel(),\n diis_err_mats[dj].ravel())\n Bmat[-1, -1] = 0\n right_vec = np.zeros(n_diis + 1)\n right_vec[-1] = -1\n C_array = np.linalg.solve(Bmat, right_vec)\n new_Fmat = np.zeros_like(diis_fmats[-1])\n for di in range(n_diis):\n new_Fmat += C_array[di] * diis_fmats[di]\n return new_Fmat\n",
"step-2": "<mask token>\n\n\ndef solve_FCeSC(Fmat, Shalf):\n Ft = np.einsum('pi,ij,jq->pq', Shalf, Fmat, Shalf)\n Feigval, Feigvec = np.linalg.eigh(Ft)\n idx = Feigval.argsort()\n Feigval = Feigval[idx]\n Feigvec = Feigvec[:, idx]\n Cmat = np.dot(Shalf, Feigvec)\n return Feigval, Cmat\n\n\ndef DIIS_extrapolate_F(diis_err_mats, diis_fmats):\n n_diis = len(diis_err_mats)\n assert n_diis == len(diis_fmats\n ), 'Number of Fock matrices should equal to number of error matrices'\n Bmat = -np.ones([n_diis + 1, n_diis + 1])\n for di in range(n_diis):\n for dj in range(di, n_diis):\n Bmat[di, dj] = Bmat[dj, di] = np.dot(diis_err_mats[di].ravel(),\n diis_err_mats[dj].ravel())\n Bmat[-1, -1] = 0\n right_vec = np.zeros(n_diis + 1)\n right_vec[-1] = -1\n C_array = np.linalg.solve(Bmat, right_vec)\n new_Fmat = np.zeros_like(diis_fmats[-1])\n for di in range(n_diis):\n new_Fmat += C_array[di] * diis_fmats[di]\n return new_Fmat\n",
"step-3": "<mask token>\n\n\ndef solve_unrestricted_hartree_fock(elems, coords, basis_set, charge=0,\n spinmult=1, maxiter=150, enable_DIIS=True, verbose=False):\n \"\"\" Unrestricted Hartree-Fock \"\"\"\n n_electron = sum(chemical_elements.index(e) for e in elems) - charge\n if verbose:\n print('This system has a total of %d electrons' % n_electron)\n n_single_e = spinmult - 1\n if (n_electron + n_single_e) % 2 != 0:\n raise RuntimeError(\n 'The specified charge %d and spinmult %d is impossible!' % (\n charge, spinmult))\n n_a = int((n_electron + n_single_e) / 2)\n n_b = n_electron - n_a\n E_nuc = calc_nuclear_repulsion(elems, coords)\n Smat, Tmat, Vmat = basis_integrals.build_one_e_matrices(elems, coords,\n basis_set)\n Hmat = Tmat + Vmat\n if n_a > len(Smat):\n raise RuntimeError(\n 'Number of basis functions is smaller than number of alpha orbitals'\n )\n G_ao = basis_integrals.build_two_electron_tensor(elems, coords, basis_set)\n if verbose:\n print('One Electron Integrals Calculated:')\n print('\\nOverlap matrix S')\n print(Smat)\n print('\\nKinetic energy matrix T')\n print(Tmat)\n print('\\nNuclear attraction matrix V')\n print(Vmat)\n print('\\nCore Hamiltonian matrix H = T + V')\n print(Hmat)\n print('\\nTwo-electron Integrals G')\n print(G_ao)\n Seigval, Seigvec = np.linalg.eig(Smat)\n Shalf = np.diag(Seigval ** -0.5)\n Shalf = np.dot(Seigvec, np.dot(Shalf, Seigvec.T))\n Dmat_a = np.zeros_like(Smat)\n Dmat_b = np.zeros_like(Smat)\n E_hf = 0\n converged = False\n if enable_DIIS is True:\n n_err_mat = 6\n diis_start_n = 4\n diis_err_mats = []\n diis_fmats_a = []\n diis_fmats_b = []\n if verbose:\n print(' *** DIIS Enabled ***')\n if verbose:\n print('\\n *** SCF Iterations *** ')\n print('Iter HF Energy delta E RMS |D|')\n print('-------------------------------------------------------')\n for i in range(maxiter):\n Fmat = Hmat + np.einsum('rs,pqrs->pq', Dmat_a + Dmat_b, G_ao)\n Fmat_a = Fmat - np.einsum('rs,prqs->pq', Dmat_a, G_ao)\n Fmat_b = Fmat - np.einsum('rs,prqs->pq', Dmat_b, G_ao)\n if enable_DIIS and i > 0:\n diis_fmats_a.append(Fmat_a)\n diis_fmats_a = diis_fmats_a[-n_err_mat:]\n diis_fmats_b.append(Fmat_b)\n diis_fmats_b = diis_fmats_b[-n_err_mat:]\n FDS_a = np.einsum('pi,ij,jq->pq', Fmat_a, Dmat_a, Smat)\n SDF_a = np.einsum('pi,ij,jq->pq', Smat, Dmat_a, Fmat_a)\n FDS_b = np.einsum('pi,ij,jq->pq', Fmat_b, Dmat_b, Smat)\n SDF_b = np.einsum('pi,ij,jq->pq', Smat, Dmat_b, Fmat_b)\n diis_err_mats.append(FDS_a - SDF_a + FDS_b - SDF_b)\n diis_err_mats = diis_err_mats[-n_err_mat:]\n n_diis = len(diis_err_mats)\n if n_diis >= diis_start_n:\n Fmat_a = DIIS_extrapolate_F(diis_err_mats, diis_fmats_a)\n Fmat_b = DIIS_extrapolate_F(diis_err_mats, diis_fmats_b)\n Feigval_a, Cmat_a = solve_FCeSC(Fmat_a, Shalf)\n C_occ = Cmat_a[:, :n_a]\n Dmat_a_new = np.dot(C_occ, C_occ.T)\n Feigval_b, Cmat_b = solve_FCeSC(Fmat_b, Shalf)\n C_occ = Cmat_b[:, :n_b]\n Dmat_b_new = np.dot(C_occ, C_occ.T)\n E_hf_new = 0.5 * (np.einsum('pq,pq', Dmat_a, Hmat + Fmat_a) + np.\n einsum('pq,pq', Dmat_b, Hmat + Fmat_b))\n dE = E_hf_new - E_hf\n D_rms = np.sqrt(np.mean((Dmat_a_new - Dmat_a) ** 2)) + np.sqrt(np.\n mean((Dmat_b_new - Dmat_b) ** 2))\n E_hf = E_hf_new\n Dmat_a = Dmat_a_new\n Dmat_b = Dmat_b_new\n if verbose is True:\n print(' %-4d %17.10f %14.4e %14.4e' % (i, E_hf, dE, D_rms))\n if abs(dE) < 1e-10 and abs(D_rms) < 1e-08:\n converged = True\n break\n if converged == False:\n print(\"SCF didn't converge in %d iterations!\" % maxiter)\n raise RuntimeError\n E_total = E_nuc + E_hf\n if verbose:\n print('\\nSCF converged!')\n print('\\nOrbital Energies (Eh) and coefficients for Alpha electrons')\n print('E: ' + ''.join([('%17.7f' % e) for e in Feigval_a]))\n print('-' * (17 * len(Feigval_a) + 4))\n for i, row in enumerate(Cmat_a):\n print('c%-3d' % i + ''.join([('%17.7f' % c) for c in row]))\n print('\\nOrbital Energies (Eh) and coefficients for Beta electrons')\n print('E: ' + ''.join([('%17.7f' % e) for e in Feigval_b]))\n print('-' * (17 * len(Feigval_b) + 4))\n for i, row in enumerate(Cmat_b):\n print('c%-3d' % i + ''.join([('%17.7f' % c) for c in row]))\n print('\\nNuclear Repulsion Energy = %17.10f Eh' % E_nuc)\n print('Total Electronic Energy = %17.10f Eh' % E_hf)\n print('Final Total Energy = %17.10f Eh' % E_total)\n return {'E_nuc': E_nuc, 'E_hf': E_hf, 'E_total': E_total, 'E_orbs_a':\n Feigval_a, 'Cmat_a': Cmat_a, 'Dmat_a': Dmat_a, 'E_orbs_b':\n Feigval_b, 'Cmat_b': Cmat_b, 'Dmat_b': Dmat_b}\n\n\ndef solve_FCeSC(Fmat, Shalf):\n Ft = np.einsum('pi,ij,jq->pq', Shalf, Fmat, Shalf)\n Feigval, Feigvec = np.linalg.eigh(Ft)\n idx = Feigval.argsort()\n Feigval = Feigval[idx]\n Feigvec = Feigvec[:, idx]\n Cmat = np.dot(Shalf, Feigvec)\n return Feigval, Cmat\n\n\ndef DIIS_extrapolate_F(diis_err_mats, diis_fmats):\n n_diis = len(diis_err_mats)\n assert n_diis == len(diis_fmats\n ), 'Number of Fock matrices should equal to number of error matrices'\n Bmat = -np.ones([n_diis + 1, n_diis + 1])\n for di in range(n_diis):\n for dj in range(di, n_diis):\n Bmat[di, dj] = Bmat[dj, di] = np.dot(diis_err_mats[di].ravel(),\n diis_err_mats[dj].ravel())\n Bmat[-1, -1] = 0\n right_vec = np.zeros(n_diis + 1)\n right_vec[-1] = -1\n C_array = np.linalg.solve(Bmat, right_vec)\n new_Fmat = np.zeros_like(diis_fmats[-1])\n for di in range(n_diis):\n new_Fmat += C_array[di] * diis_fmats[di]\n return new_Fmat\n",
"step-4": "<mask token>\nimport numpy as np\nfrom qc_python import basis_integrals\nfrom qc_python.common import chemical_elements, calc_nuclear_repulsion\n\n\ndef solve_unrestricted_hartree_fock(elems, coords, basis_set, charge=0,\n spinmult=1, maxiter=150, enable_DIIS=True, verbose=False):\n \"\"\" Unrestricted Hartree-Fock \"\"\"\n n_electron = sum(chemical_elements.index(e) for e in elems) - charge\n if verbose:\n print('This system has a total of %d electrons' % n_electron)\n n_single_e = spinmult - 1\n if (n_electron + n_single_e) % 2 != 0:\n raise RuntimeError(\n 'The specified charge %d and spinmult %d is impossible!' % (\n charge, spinmult))\n n_a = int((n_electron + n_single_e) / 2)\n n_b = n_electron - n_a\n E_nuc = calc_nuclear_repulsion(elems, coords)\n Smat, Tmat, Vmat = basis_integrals.build_one_e_matrices(elems, coords,\n basis_set)\n Hmat = Tmat + Vmat\n if n_a > len(Smat):\n raise RuntimeError(\n 'Number of basis functions is smaller than number of alpha orbitals'\n )\n G_ao = basis_integrals.build_two_electron_tensor(elems, coords, basis_set)\n if verbose:\n print('One Electron Integrals Calculated:')\n print('\\nOverlap matrix S')\n print(Smat)\n print('\\nKinetic energy matrix T')\n print(Tmat)\n print('\\nNuclear attraction matrix V')\n print(Vmat)\n print('\\nCore Hamiltonian matrix H = T + V')\n print(Hmat)\n print('\\nTwo-electron Integrals G')\n print(G_ao)\n Seigval, Seigvec = np.linalg.eig(Smat)\n Shalf = np.diag(Seigval ** -0.5)\n Shalf = np.dot(Seigvec, np.dot(Shalf, Seigvec.T))\n Dmat_a = np.zeros_like(Smat)\n Dmat_b = np.zeros_like(Smat)\n E_hf = 0\n converged = False\n if enable_DIIS is True:\n n_err_mat = 6\n diis_start_n = 4\n diis_err_mats = []\n diis_fmats_a = []\n diis_fmats_b = []\n if verbose:\n print(' *** DIIS Enabled ***')\n if verbose:\n print('\\n *** SCF Iterations *** ')\n print('Iter HF Energy delta E RMS |D|')\n print('-------------------------------------------------------')\n for i in range(maxiter):\n Fmat = Hmat + np.einsum('rs,pqrs->pq', Dmat_a + Dmat_b, G_ao)\n Fmat_a = Fmat - np.einsum('rs,prqs->pq', Dmat_a, G_ao)\n Fmat_b = Fmat - np.einsum('rs,prqs->pq', Dmat_b, G_ao)\n if enable_DIIS and i > 0:\n diis_fmats_a.append(Fmat_a)\n diis_fmats_a = diis_fmats_a[-n_err_mat:]\n diis_fmats_b.append(Fmat_b)\n diis_fmats_b = diis_fmats_b[-n_err_mat:]\n FDS_a = np.einsum('pi,ij,jq->pq', Fmat_a, Dmat_a, Smat)\n SDF_a = np.einsum('pi,ij,jq->pq', Smat, Dmat_a, Fmat_a)\n FDS_b = np.einsum('pi,ij,jq->pq', Fmat_b, Dmat_b, Smat)\n SDF_b = np.einsum('pi,ij,jq->pq', Smat, Dmat_b, Fmat_b)\n diis_err_mats.append(FDS_a - SDF_a + FDS_b - SDF_b)\n diis_err_mats = diis_err_mats[-n_err_mat:]\n n_diis = len(diis_err_mats)\n if n_diis >= diis_start_n:\n Fmat_a = DIIS_extrapolate_F(diis_err_mats, diis_fmats_a)\n Fmat_b = DIIS_extrapolate_F(diis_err_mats, diis_fmats_b)\n Feigval_a, Cmat_a = solve_FCeSC(Fmat_a, Shalf)\n C_occ = Cmat_a[:, :n_a]\n Dmat_a_new = np.dot(C_occ, C_occ.T)\n Feigval_b, Cmat_b = solve_FCeSC(Fmat_b, Shalf)\n C_occ = Cmat_b[:, :n_b]\n Dmat_b_new = np.dot(C_occ, C_occ.T)\n E_hf_new = 0.5 * (np.einsum('pq,pq', Dmat_a, Hmat + Fmat_a) + np.\n einsum('pq,pq', Dmat_b, Hmat + Fmat_b))\n dE = E_hf_new - E_hf\n D_rms = np.sqrt(np.mean((Dmat_a_new - Dmat_a) ** 2)) + np.sqrt(np.\n mean((Dmat_b_new - Dmat_b) ** 2))\n E_hf = E_hf_new\n Dmat_a = Dmat_a_new\n Dmat_b = Dmat_b_new\n if verbose is True:\n print(' %-4d %17.10f %14.4e %14.4e' % (i, E_hf, dE, D_rms))\n if abs(dE) < 1e-10 and abs(D_rms) < 1e-08:\n converged = True\n break\n if converged == False:\n print(\"SCF didn't converge in %d iterations!\" % maxiter)\n raise RuntimeError\n E_total = E_nuc + E_hf\n if verbose:\n print('\\nSCF converged!')\n print('\\nOrbital Energies (Eh) and coefficients for Alpha electrons')\n print('E: ' + ''.join([('%17.7f' % e) for e in Feigval_a]))\n print('-' * (17 * len(Feigval_a) + 4))\n for i, row in enumerate(Cmat_a):\n print('c%-3d' % i + ''.join([('%17.7f' % c) for c in row]))\n print('\\nOrbital Energies (Eh) and coefficients for Beta electrons')\n print('E: ' + ''.join([('%17.7f' % e) for e in Feigval_b]))\n print('-' * (17 * len(Feigval_b) + 4))\n for i, row in enumerate(Cmat_b):\n print('c%-3d' % i + ''.join([('%17.7f' % c) for c in row]))\n print('\\nNuclear Repulsion Energy = %17.10f Eh' % E_nuc)\n print('Total Electronic Energy = %17.10f Eh' % E_hf)\n print('Final Total Energy = %17.10f Eh' % E_total)\n return {'E_nuc': E_nuc, 'E_hf': E_hf, 'E_total': E_total, 'E_orbs_a':\n Feigval_a, 'Cmat_a': Cmat_a, 'Dmat_a': Dmat_a, 'E_orbs_b':\n Feigval_b, 'Cmat_b': Cmat_b, 'Dmat_b': Dmat_b}\n\n\ndef solve_FCeSC(Fmat, Shalf):\n Ft = np.einsum('pi,ij,jq->pq', Shalf, Fmat, Shalf)\n Feigval, Feigvec = np.linalg.eigh(Ft)\n idx = Feigval.argsort()\n Feigval = Feigval[idx]\n Feigvec = Feigvec[:, idx]\n Cmat = np.dot(Shalf, Feigvec)\n return Feigval, Cmat\n\n\ndef DIIS_extrapolate_F(diis_err_mats, diis_fmats):\n n_diis = len(diis_err_mats)\n assert n_diis == len(diis_fmats\n ), 'Number of Fock matrices should equal to number of error matrices'\n Bmat = -np.ones([n_diis + 1, n_diis + 1])\n for di in range(n_diis):\n for dj in range(di, n_diis):\n Bmat[di, dj] = Bmat[dj, di] = np.dot(diis_err_mats[di].ravel(),\n diis_err_mats[dj].ravel())\n Bmat[-1, -1] = 0\n right_vec = np.zeros(n_diis + 1)\n right_vec[-1] = -1\n C_array = np.linalg.solve(Bmat, right_vec)\n new_Fmat = np.zeros_like(diis_fmats[-1])\n for di in range(n_diis):\n new_Fmat += C_array[di] * diis_fmats[di]\n return new_Fmat\n",
"step-5": "\n\"\"\"\nAuthor: Yudong Qiu\nFunctions for solving unrestricted Hartree-Fock\n\"\"\"\n\nimport numpy as np\n\nfrom qc_python import basis_integrals\nfrom qc_python.common import chemical_elements, calc_nuclear_repulsion\n\ndef solve_unrestricted_hartree_fock(elems, coords, basis_set, charge=0, spinmult=1, maxiter=150, enable_DIIS=True, verbose=False):\n \"\"\" Unrestricted Hartree-Fock \"\"\"\n # Compute the number of electrons in the system\n n_electron = sum(chemical_elements.index(e) for e in elems) - charge\n if verbose:\n print(\"This system has a total of %d electrons\" % n_electron)\n n_single_e = spinmult - 1\n if (n_electron+n_single_e) % 2 != 0:\n raise RuntimeError(\"The specified charge %d and spinmult %d is impossible!\" % (charge, spinmult))\n # number of alpha and beta orbitals\n n_a = int((n_electron + n_single_e) / 2)\n n_b = n_electron - n_a\n # compute nuclear repulsion energy\n E_nuc = calc_nuclear_repulsion(elems, coords)\n # compute one-electron integral matrices\n Smat, Tmat, Vmat = basis_integrals.build_one_e_matrices(elems, coords, basis_set)\n Hmat = Tmat + Vmat\n # check if we have enough basis functions to hold all the electrons\n if n_a > len(Smat):\n raise RuntimeError(\"Number of basis functions is smaller than number of alpha orbitals\")\n # build two-electron integral tensor g = (pq|rs)\n G_ao = basis_integrals.build_two_electron_tensor(elems, coords, basis_set)\n if verbose:\n print(\"One Electron Integrals Calculated:\")\n print(\"\\nOverlap matrix S\")\n print(Smat)\n print(\"\\nKinetic energy matrix T\")\n print(Tmat)\n print(\"\\nNuclear attraction matrix V\")\n print(Vmat)\n print(\"\\nCore Hamiltonian matrix H = T + V\")\n print(Hmat)\n print(\"\\nTwo-electron Integrals G\")\n print(G_ao)\n # Solve the FC = ESC equation by converting it to Ft C' = E C'\n # Diagonalize overlap matrix and form S^(-1/2) matrix\n Seigval, Seigvec = np.linalg.eig(Smat)\n Shalf = np.diag(Seigval**-0.5)\n Shalf = np.dot(Seigvec, np.dot(Shalf, Seigvec.T))\n # intial guess density\n Dmat_a = np.zeros_like(Smat)\n Dmat_b = np.zeros_like(Smat)\n E_hf = 0\n converged = False\n # DIIS\n if enable_DIIS is True:\n n_err_mat = 6\n diis_start_n = 4\n diis_err_mats = []\n #diis_err_mats_a = []\n #diis_err_mats_b = []\n diis_fmats_a = []\n diis_fmats_b = []\n if verbose:\n print(\" *** DIIS Enabled ***\")\n if verbose:\n print(\"\\n *** SCF Iterations *** \")\n print(\"Iter HF Energy delta E RMS |D|\")\n print(\"-------------------------------------------------------\")\n for i in range(maxiter):\n Fmat = Hmat + np.einsum(\"rs,pqrs->pq\",(Dmat_a+Dmat_b),G_ao)\n Fmat_a = Fmat - np.einsum(\"rs,prqs->pq\",Dmat_a,G_ao)\n Fmat_b = Fmat - np.einsum(\"rs,prqs->pq\",Dmat_b,G_ao)\n if enable_DIIS and i > 0:\n ## DIIS for alpha spin\n #FDS = np.einsum(\"pi,ij,jq->pq\",Fmat_a,Dmat_a,Smat)\n #SDF = np.einsum(\"pi,ij,jq->pq\",Smat,Dmat_a,Fmat_a)\n #diis_err_mats_a.append(FDS-SDF)\n #diis_err_mats_a = diis_err_mats_a[-n_err_mat:]\n diis_fmats_a.append(Fmat_a)\n diis_fmats_a = diis_fmats_a[-n_err_mat:]\n ## DIIS for beta spin\n #FDS = np.einsum(\"pi,ij,jq->pq\",Fmat_b,Dmat_b,Smat)\n #SDF = np.einsum(\"pi,ij,jq->pq\",Smat,Dmat_b,Fmat_b)\n #diis_err_mats_b.append(FDS-SDF)\n #diis_err_mats_b = diis_err_mats_b[-n_err_mat:]\n diis_fmats_b.append(Fmat_b)\n diis_fmats_b = diis_fmats_b[-n_err_mat:]\n FDS_a = np.einsum(\"pi,ij,jq->pq\",Fmat_a,Dmat_a,Smat)\n SDF_a = np.einsum(\"pi,ij,jq->pq\",Smat,Dmat_a,Fmat_a)\n FDS_b = np.einsum(\"pi,ij,jq->pq\",Fmat_b,Dmat_b,Smat)\n SDF_b = np.einsum(\"pi,ij,jq->pq\",Smat,Dmat_b,Fmat_b)\n diis_err_mats.append(FDS_a-SDF_a+FDS_b-SDF_b)\n diis_err_mats = diis_err_mats[-n_err_mat:]\n # compute Bmat_ij = Err_i . Err_j\n n_diis = len(diis_err_mats)\n if n_diis >= diis_start_n:\n Fmat_a = DIIS_extrapolate_F(diis_err_mats, diis_fmats_a)\n Fmat_b = DIIS_extrapolate_F(diis_err_mats, diis_fmats_b)\n # solve the alpha HF equation F_a C_a = e_a S C_a\n Feigval_a, Cmat_a = solve_FCeSC(Fmat_a, Shalf)\n C_occ = Cmat_a[:, :n_a]\n Dmat_a_new = np.dot(C_occ, C_occ.T)\n # solve the beta HF equation F_b C_b = e_b S C_b\n Feigval_b, Cmat_b = solve_FCeSC(Fmat_b, Shalf)\n C_occ = Cmat_b[:, :n_b]\n Dmat_b_new = np.dot(C_occ, C_occ.T)\n\n E_hf_new = 0.5 * (np.einsum(\"pq,pq\", Dmat_a, Hmat+Fmat_a) + np.einsum(\"pq,pq\", Dmat_b, Hmat+Fmat_b))\n dE = E_hf_new - E_hf\n D_rms = np.sqrt(np.mean((Dmat_a_new-Dmat_a)**2)) + np.sqrt(np.mean((Dmat_b_new-Dmat_b)**2))\n # update E_hf and Dmat\n E_hf = E_hf_new\n Dmat_a = Dmat_a_new\n Dmat_b = Dmat_b_new\n # print iteration information\n if verbose is True:\n print(\" %-4d %17.10f %14.4e %14.4e\" %(i, E_hf, dE, D_rms))\n # check convergence\n if abs(dE) < 1.0E-10 and abs(D_rms) < 1.0E-8:\n converged = True\n break\n if converged == False:\n print(\"SCF didn't converge in %d iterations!\" % maxiter)\n raise RuntimeError\n E_total = E_nuc + E_hf\n if verbose:\n print(\"\\nSCF converged!\")\n print(\"\\nOrbital Energies (Eh) and coefficients for Alpha electrons\")\n print('E: '+''.join([\"%17.7f\"%e for e in Feigval_a]))\n print('-' * (17 * len(Feigval_a) + 4))\n for i,row in enumerate(Cmat_a):\n print('c%-3d'%i + ''.join([\"%17.7f\"%c for c in row]))\n print(\"\\nOrbital Energies (Eh) and coefficients for Beta electrons\")\n print('E: '+''.join([\"%17.7f\"%e for e in Feigval_b]))\n print('-' * (17 * len(Feigval_b) + 4))\n for i,row in enumerate(Cmat_b):\n print('c%-3d'%i + ''.join([\"%17.7f\"%c for c in row]))\n print(\"\\nNuclear Repulsion Energy = %17.10f Eh\" % E_nuc)\n print(\"Total Electronic Energy = %17.10f Eh\" % E_hf)\n print(\"Final Total Energy = %17.10f Eh\" % E_total)\n return {\"E_nuc\":E_nuc, \"E_hf\": E_hf, \"E_total\":E_total, \"E_orbs_a\": Feigval_a, \"Cmat_a\": Cmat_a, \"Dmat_a\": Dmat_a,\n \"E_orbs_b\": Feigval_b, \"Cmat_b\": Cmat_b, \"Dmat_b\": Dmat_b}\n\ndef solve_FCeSC(Fmat, Shalf):\n Ft = np.einsum(\"pi,ij,jq->pq\",Shalf,Fmat,Shalf)\n Feigval, Feigvec = np.linalg.eigh(Ft)\n idx = Feigval.argsort()\n Feigval = Feigval[idx]\n Feigvec = Feigvec[:,idx]\n Cmat = np.dot(Shalf, Feigvec)\n return Feigval, Cmat\n\ndef DIIS_extrapolate_F(diis_err_mats, diis_fmats):\n n_diis = len(diis_err_mats)\n assert n_diis == len(diis_fmats), 'Number of Fock matrices should equal to number of error matrices'\n Bmat = -np.ones([n_diis+1, n_diis+1])\n for di in range(n_diis):\n for dj in range(di, n_diis):\n Bmat[di,dj] = Bmat[dj,di] = np.dot(diis_err_mats[di].ravel(), diis_err_mats[dj].ravel())\n Bmat[-1,-1] = 0\n # Solve the equation Bmat * C = [0,0,..,-1]\n right_vec = np.zeros(n_diis+1)\n right_vec[-1] = -1\n C_array = np.linalg.solve(Bmat, right_vec)\n # Form the new guess Fmat\n new_Fmat = np.zeros_like(diis_fmats[-1])\n for di in range(n_diis):\n new_Fmat += C_array[di] * diis_fmats[di]\n return new_Fmat",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def possibleWords(a, N, index=0, s=''):
if index == N:
final.append(s)
print(s, end=' ')
return
possible_chars = refer[a[0]]
for i in possible_chars:
s += i
possibleWords(a[1:], N, index + 1, s)
s = s[:-1]
<|reserved_special_token_1|>
final = []
refer = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7):
'pqrs', (8): 'tuv', (9): 'wxyz'}
def possibleWords(a, N, index=0, s=''):
if index == N:
final.append(s)
print(s, end=' ')
return
possible_chars = refer[a[0]]
for i in possible_chars:
s += i
possibleWords(a[1:], N, index + 1, s)
s = s[:-1]
<|reserved_special_token_1|>
final=[]
refer={2:'abc',3:'def',4:'ghi',5:'jkl',6:'mno',7:'pqrs',8:'tuv',9:'wxyz'}
##Complete this function
def possibleWords(a,N,index=0,s=''):
##Your code here
if index==N:
final.append(s)
print(s, end=' ')
return
possible_chars=refer[a[0]]
for i in possible_chars:
s+= i
possibleWords(a[1:],N,index+1,s)
s=s[:-1]
|
flexible
|
{
"blob_id": "5f237a820832181395de845cc25b661878c334e4",
"index": 9965,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef possibleWords(a, N, index=0, s=''):\n if index == N:\n final.append(s)\n print(s, end=' ')\n return\n possible_chars = refer[a[0]]\n for i in possible_chars:\n s += i\n possibleWords(a[1:], N, index + 1, s)\n s = s[:-1]\n",
"step-3": "final = []\nrefer = {(2): 'abc', (3): 'def', (4): 'ghi', (5): 'jkl', (6): 'mno', (7):\n 'pqrs', (8): 'tuv', (9): 'wxyz'}\n\n\ndef possibleWords(a, N, index=0, s=''):\n if index == N:\n final.append(s)\n print(s, end=' ')\n return\n possible_chars = refer[a[0]]\n for i in possible_chars:\n s += i\n possibleWords(a[1:], N, index + 1, s)\n s = s[:-1]\n",
"step-4": "final=[]\nrefer={2:'abc',3:'def',4:'ghi',5:'jkl',6:'mno',7:'pqrs',8:'tuv',9:'wxyz'}\n##Complete this function\ndef possibleWords(a,N,index=0,s=''):\n ##Your code here\n \n if index==N:\n final.append(s)\n print(s, end=' ')\n return\n \n possible_chars=refer[a[0]]\n for i in possible_chars:\n \n s+= i\n possibleWords(a[1:],N,index+1,s)\n s=s[:-1]\n \n \n \n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.