code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
#Displaying multiple images using matplotlib
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
def main():
imgpath1="C:\Shreyas\OpenCv\DIP_OpenCV\lena.png"
imgpath2="C:\Shreyas\OpenCv\DIP_OpenCV\lena.png"
img1=cv2.imread(imgpath1,1)
img2=cv2.imread(imgpath2,1)
titles = ['Pepper Gray', 'Peppers Color']
images = [img1, img2]
for i in range(2):
plt.subplot(1, 2, i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "2867a7b24b4911b2936cb34653fa57431c14d6a3",
"index": 7319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n imgpath1 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n imgpath2 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n img1 = cv2.imread(imgpath1, 1)\n img2 = cv2.imread(imgpath2, 1)\n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n imgpath1 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n imgpath2 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n img1 = cv2.imread(imgpath1, 1)\n img2 = cv2.imread(imgpath2, 1)\n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef main():\n imgpath1 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n imgpath2 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n img1 = cv2.imread(imgpath1, 1)\n img2 = cv2.imread(imgpath2, 1)\n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#Displaying multiple images using matplotlib\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\ndef main():\n \n imgpath1=\"C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png\"\n imgpath2=\"C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png\"\n \n img1=cv2.imread(imgpath1,1) \n img2=cv2.imread(imgpath2,1)\n \n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n \n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n\n plt.show() \n \nif __name__ == \"__main__\":\n main()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class UpdateDbDataView(View):
<|reserved_special_token_0|>
def get(self, request, testupdatadb_id):
if request.user.username == 'check':
return render(request, 'canNotAddupdatedbdata.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
elif request.user.is_active:
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}
)
else:
return render(request, 'addContentError.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UpdateDbDataView(View):
<|reserved_special_token_0|>
def get(self, request, testupdatadb_id):
if request.user.username == 'check':
return render(request, 'canNotAddupdatedbdata.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
elif request.user.is_active:
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}
)
else:
return render(request, 'addContentError.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
def post(self, request, testupdatadb_id):
username = request.user.username
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
updatedbdata_form = UpdateDbDataForm(request.POST)
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
if updatedbdata_form.is_valid():
updatedbdata_form.save(commit=True)
zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0]
user = User.objects.get(username=username)
zj.write_user_id = user.id
zj.save()
updatedbdataid = zj.id
updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid))
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdataadd, 'updatedbdata_all':
updatedbdata_all, 'sumsg': u'添加测试用例---【{}】---成功,请继续添加'.
format(updatedbdataadd.test_case_title),
'django_server_yuming': DJANGO_SERVER_YUMING})
else:
return render(request, 'updatedbdata/updatedbdataForm.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'updatedbdataform': updatedbdata_form,
'errmsg': u'添加失败,请重新添加,添加时请检查各个字段是否填写',
'django_server_yuming': DJANGO_SERVER_YUMING})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UpdateDbDataView(View):
"""
测试数据复制编写页面处理
"""
def get(self, request, testupdatadb_id):
if request.user.username == 'check':
return render(request, 'canNotAddupdatedbdata.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
elif request.user.is_active:
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}
)
else:
return render(request, 'addContentError.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
def post(self, request, testupdatadb_id):
username = request.user.username
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
updatedbdata_form = UpdateDbDataForm(request.POST)
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
if updatedbdata_form.is_valid():
updatedbdata_form.save(commit=True)
zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0]
user = User.objects.get(username=username)
zj.write_user_id = user.id
zj.save()
updatedbdataid = zj.id
updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid))
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdataadd, 'updatedbdata_all':
updatedbdata_all, 'sumsg': u'添加测试用例---【{}】---成功,请继续添加'.
format(updatedbdataadd.test_case_title),
'django_server_yuming': DJANGO_SERVER_YUMING})
else:
return render(request, 'updatedbdata/updatedbdataForm.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'updatedbdataform': updatedbdata_form,
'errmsg': u'添加失败,请重新添加,添加时请检查各个字段是否填写',
'django_server_yuming': DJANGO_SERVER_YUMING})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic import View
from .models import UpdateDbData, User
from wanwenyc.settings import DJANGO_SERVER_YUMING
from .forms import UpdateDbDataForm
class UpdateDbDataView(View):
"""
测试数据复制编写页面处理
"""
def get(self, request, testupdatadb_id):
if request.user.username == 'check':
return render(request, 'canNotAddupdatedbdata.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
elif request.user.is_active:
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}
)
else:
return render(request, 'addContentError.html', {
'django_server_yuming': DJANGO_SERVER_YUMING})
def post(self, request, testupdatadb_id):
username = request.user.username
updatedbdata_all = UpdateDbData.objects.all().order_by('-id')
updatedbdata_form = UpdateDbDataForm(request.POST)
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))
if updatedbdata_form.is_valid():
updatedbdata_form.save(commit=True)
zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0]
user = User.objects.get(username=username)
zj.write_user_id = user.id
zj.save()
updatedbdataid = zj.id
updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid))
return render(request, 'updatedbdata/updatedbdata.html', {
'updatedbdata': updatedbdataadd, 'updatedbdata_all':
updatedbdata_all, 'sumsg': u'添加测试用例---【{}】---成功,请继续添加'.
format(updatedbdataadd.test_case_title),
'django_server_yuming': DJANGO_SERVER_YUMING})
else:
return render(request, 'updatedbdata/updatedbdataForm.html', {
'updatedbdata': updatedbdata, 'updatedbdata_all':
updatedbdata_all, 'updatedbdataform': updatedbdata_form,
'errmsg': u'添加失败,请重新添加,添加时请检查各个字段是否填写',
'django_server_yuming': DJANGO_SERVER_YUMING})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic import View #导入View
from .models import UpdateDbData,User
from wanwenyc.settings import DJANGO_SERVER_YUMING
from .forms import UpdateDbDataForm
# Create your views here.
#添加场景的view
class UpdateDbDataView(View): #继承View
"""
测试数据复制编写页面处理
"""
def get(self,request,testupdatadb_id):
if request.user.username == 'check':
return render(request, "canNotAddupdatedbdata.html",{
"django_server_yuming":DJANGO_SERVER_YUMING
})
elif request.user.is_active:
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id)) #获取用例
updatedbdata_all = UpdateDbData.objects.all().order_by("-id")
return render(request,"updatedbdata/updatedbdata.html",
{"updatedbdata":updatedbdata,
"updatedbdata_all":updatedbdata_all,
"django_server_yuming": DJANGO_SERVER_YUMING,
})
else:
return render(request,"addContentError.html",{
"django_server_yuming": DJANGO_SERVER_YUMING
})
def post(self, request,testupdatadb_id):
username = request.user.username
updatedbdata_all = UpdateDbData.objects.all().order_by("-id")
updatedbdata_form = UpdateDbDataForm(request.POST) # 实例化updatedbdataForm()
updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id)) # 获取用例
if updatedbdata_form.is_valid(): # is_valid()判断是否有错
updatedbdata_form.save(commit=True) # 将信息保存到数据库中
zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0] # 根据添加时间查询最新的
user = User.objects.get(username=username)
zj.write_user_id = user.id
zj.save()
updatedbdataid = zj.id
updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid)) # 获取用例
return render(request, "updatedbdata/updatedbdata.html", {
"updatedbdata": updatedbdataadd,
"updatedbdata_all": updatedbdata_all,
"sumsg":u"添加测试用例---【{}】---成功,请继续添加".format(updatedbdataadd.test_case_title),
"django_server_yuming": DJANGO_SERVER_YUMING,
})
else:
return render(request, 'updatedbdata/updatedbdataForm.html', {
"updatedbdata": updatedbdata,
"updatedbdata_all": updatedbdata_all,
"updatedbdataform": updatedbdata_form,
"errmsg":u"添加失败,请重新添加,添加时请检查各个字段是否填写",
"django_server_yuming": DJANGO_SERVER_YUMING,
}) # 返回页面,回填信息
|
flexible
|
{
"blob_id": "129c7f349e2723d9555da44ae62f7cfb7227b9ae",
"index": 5618,
"step-1": "<mask token>\n\n\nclass UpdateDbDataView(View):\n <mask token>\n\n def get(self, request, testupdatadb_id):\n if request.user.username == 'check':\n return render(request, 'canNotAddupdatedbdata.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n elif request.user.is_active:\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}\n )\n else:\n return render(request, 'addContentError.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass UpdateDbDataView(View):\n <mask token>\n\n def get(self, request, testupdatadb_id):\n if request.user.username == 'check':\n return render(request, 'canNotAddupdatedbdata.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n elif request.user.is_active:\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}\n )\n else:\n return render(request, 'addContentError.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n\n def post(self, request, testupdatadb_id):\n username = request.user.username\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n updatedbdata_form = UpdateDbDataForm(request.POST)\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n if updatedbdata_form.is_valid():\n updatedbdata_form.save(commit=True)\n zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0]\n user = User.objects.get(username=username)\n zj.write_user_id = user.id\n zj.save()\n updatedbdataid = zj.id\n updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid))\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdataadd, 'updatedbdata_all':\n updatedbdata_all, 'sumsg': u'添加测试用例---【{}】---成功,请继续添加'.\n format(updatedbdataadd.test_case_title),\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n else:\n return render(request, 'updatedbdata/updatedbdataForm.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'updatedbdataform': updatedbdata_form,\n 'errmsg': u'添加失败,请重新添加,添加时请检查各个字段是否填写',\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n",
"step-3": "<mask token>\n\n\nclass UpdateDbDataView(View):\n \"\"\"\n 测试数据复制编写页面处理\n \"\"\"\n\n def get(self, request, testupdatadb_id):\n if request.user.username == 'check':\n return render(request, 'canNotAddupdatedbdata.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n elif request.user.is_active:\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}\n )\n else:\n return render(request, 'addContentError.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n\n def post(self, request, testupdatadb_id):\n username = request.user.username\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n updatedbdata_form = UpdateDbDataForm(request.POST)\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n if updatedbdata_form.is_valid():\n updatedbdata_form.save(commit=True)\n zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0]\n user = User.objects.get(username=username)\n zj.write_user_id = user.id\n zj.save()\n updatedbdataid = zj.id\n updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid))\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdataadd, 'updatedbdata_all':\n updatedbdata_all, 'sumsg': u'添加测试用例---【{}】---成功,请继续添加'.\n format(updatedbdataadd.test_case_title),\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n else:\n return render(request, 'updatedbdata/updatedbdataForm.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'updatedbdataform': updatedbdata_form,\n 'errmsg': u'添加失败,请重新添加,添加时请检查各个字段是否填写',\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import View\nfrom .models import UpdateDbData, User\nfrom wanwenyc.settings import DJANGO_SERVER_YUMING\nfrom .forms import UpdateDbDataForm\n\n\nclass UpdateDbDataView(View):\n \"\"\"\n 测试数据复制编写页面处理\n \"\"\"\n\n def get(self, request, testupdatadb_id):\n if request.user.username == 'check':\n return render(request, 'canNotAddupdatedbdata.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n elif request.user.is_active:\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'django_server_yuming': DJANGO_SERVER_YUMING}\n )\n else:\n return render(request, 'addContentError.html', {\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n\n def post(self, request, testupdatadb_id):\n username = request.user.username\n updatedbdata_all = UpdateDbData.objects.all().order_by('-id')\n updatedbdata_form = UpdateDbDataForm(request.POST)\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id))\n if updatedbdata_form.is_valid():\n updatedbdata_form.save(commit=True)\n zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0]\n user = User.objects.get(username=username)\n zj.write_user_id = user.id\n zj.save()\n updatedbdataid = zj.id\n updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid))\n return render(request, 'updatedbdata/updatedbdata.html', {\n 'updatedbdata': updatedbdataadd, 'updatedbdata_all':\n updatedbdata_all, 'sumsg': u'添加测试用例---【{}】---成功,请继续添加'.\n format(updatedbdataadd.test_case_title),\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n else:\n return render(request, 'updatedbdata/updatedbdataForm.html', {\n 'updatedbdata': updatedbdata, 'updatedbdata_all':\n updatedbdata_all, 'updatedbdataform': updatedbdata_form,\n 'errmsg': u'添加失败,请重新添加,添加时请检查各个字段是否填写',\n 'django_server_yuming': DJANGO_SERVER_YUMING})\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import View #导入View\n\n\nfrom .models import UpdateDbData,User\nfrom wanwenyc.settings import DJANGO_SERVER_YUMING\n\nfrom .forms import UpdateDbDataForm\n\n\n\n# Create your views here.\n#添加场景的view\nclass UpdateDbDataView(View): #继承View\n \"\"\"\n 测试数据复制编写页面处理\n \"\"\"\n def get(self,request,testupdatadb_id):\n if request.user.username == 'check':\n return render(request, \"canNotAddupdatedbdata.html\",{\n \"django_server_yuming\":DJANGO_SERVER_YUMING\n })\n elif request.user.is_active:\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id)) #获取用例\n updatedbdata_all = UpdateDbData.objects.all().order_by(\"-id\")\n return render(request,\"updatedbdata/updatedbdata.html\",\n {\"updatedbdata\":updatedbdata,\n \"updatedbdata_all\":updatedbdata_all,\n \"django_server_yuming\": DJANGO_SERVER_YUMING,\n })\n else:\n return render(request,\"addContentError.html\",{\n \"django_server_yuming\": DJANGO_SERVER_YUMING\n })\n\n def post(self, request,testupdatadb_id):\n username = request.user.username\n updatedbdata_all = UpdateDbData.objects.all().order_by(\"-id\")\n updatedbdata_form = UpdateDbDataForm(request.POST) # 实例化updatedbdataForm()\n updatedbdata = UpdateDbData.objects.get(id=int(testupdatadb_id)) # 获取用例\n\n if updatedbdata_form.is_valid(): # is_valid()判断是否有错\n\n updatedbdata_form.save(commit=True) # 将信息保存到数据库中\n\n zj = UpdateDbData.objects.all().order_by('-add_time')[:1][0] # 根据添加时间查询最新的\n user = User.objects.get(username=username)\n zj.write_user_id = user.id\n zj.save()\n\n updatedbdataid = zj.id\n updatedbdataadd = UpdateDbData.objects.get(id=int(updatedbdataid)) # 获取用例\n return render(request, \"updatedbdata/updatedbdata.html\", {\n \"updatedbdata\": updatedbdataadd,\n \"updatedbdata_all\": updatedbdata_all,\n \"sumsg\":u\"添加测试用例---【{}】---成功,请继续添加\".format(updatedbdataadd.test_case_title),\n \"django_server_yuming\": DJANGO_SERVER_YUMING,\n })\n else:\n return render(request, 'updatedbdata/updatedbdataForm.html', {\n \"updatedbdata\": updatedbdata,\n \"updatedbdata_all\": updatedbdata_all,\n \"updatedbdataform\": updatedbdata_form,\n \"errmsg\":u\"添加失败,请重新添加,添加时请检查各个字段是否填写\",\n \"django_server_yuming\": DJANGO_SERVER_YUMING,\n }) # 返回页面,回填信息\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('chr01.txt') as a:
while 1:
seq = a.read(2)
seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a'
).replace('11', 't')
seq2 += seq
if not seq:
break
print(len(seq2))
print(seq2)
<|reserved_special_token_1|>
seq2 = ''
with open('chr01.txt') as a:
while 1:
seq = a.read(2)
seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a'
).replace('11', 't')
seq2 += seq
if not seq:
break
print(len(seq2))
print(seq2)
<|reserved_special_token_1|>
# open a converted base to bits file and convert it back to the base sequences
seq2 = ''
with open('chr01.txt') as a:
while 1:
seq = a.read(2)
# print(seq)
seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a').replace('11', 't')
seq2 += seq
if not seq:
break
print(len(seq2))
print(seq2)
|
flexible
|
{
"blob_id": "c2f859e0ed0e812768dec04b2b1f9ddd349350f6",
"index": 9780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('chr01.txt') as a:\n while 1:\n seq = a.read(2)\n seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a'\n ).replace('11', 't')\n seq2 += seq\n if not seq:\n break\nprint(len(seq2))\nprint(seq2)\n",
"step-3": "seq2 = ''\nwith open('chr01.txt') as a:\n while 1:\n seq = a.read(2)\n seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a'\n ).replace('11', 't')\n seq2 += seq\n if not seq:\n break\nprint(len(seq2))\nprint(seq2)\n",
"step-4": "# open a converted base to bits file and convert it back to the base sequences\n\nseq2 = ''\nwith open('chr01.txt') as a:\n while 1:\n seq = a.read(2)\n # print(seq)\n seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a').replace('11', 't')\n seq2 += seq\n if not seq:\n break\n\nprint(len(seq2))\nprint(seq2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Please Note:
Note: It is intended for some problems to be ambiguous. You should gather all requirements up front before implementing one.
Please think of all the corner cases and clarifications yourself.
Validate if a given string is numeric.
Examples:
1."0" => true
2." 0.1 " => true
3."abc" => false
4."1 a" => false
5."2e10" => true
Return 0 / 1 ( 0 for false, 1 for true ) for this problem
Clarify the question using “See Expected Output”
1.Is 1u ( which may be a representation for unsigned integers valid?
For this problem, no.
2.Is 0.1e10 valid?
Yes
3.-01.1e-10?
Yes
4.Hexadecimal numbers like 0xFF?
Not for the purpose of this problem
5. 3. (. not followed by a digit)?
No
6.Can exponent have decimal numbers? 3e0.1?
Not for this problem.
7.Is 1f ( floating point number with f as prefix ) valid?
Not for this problem.
8.How about 1000LL or 1000L ( C++ representation for long and long long numbers )?
Not for this problem.
9.How about integers preceded by 00 or 0? like 008?
Yes for this problem
'''
class Solution:
# @param A : string
# @return an integer
def isNumber(self, A):
while len(A)>0 and A[0]==' ':
A = A[1:]
A=A[::-1]
while len(A)>0 and A[0]==' ':
A = A[1:]
A=A[::-1]
if len(A)==0:
return 0
for c in A:
if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:
return 0
if 'e' in A:
A = A.split('e')
if len(A)!=2:
return 0
return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))
return int(self.isnum(A, 0))
def isnum(self, A, i):
#print(A,i)
if A=='':
return False
if i == 1 or (i == 0 and '.' not in A):
if A[0] in ['+', '-']:
A = A[1:]
if A == '':
return False
for c in A:
if c not in [str(i) for i in range(10)]:
return False
return True
A = A.split('.')
return (self.isnum(A[0], 1) or A[0]=='') and self.isnum(A[1], 1)
|
normal
|
{
"blob_id": "50be2cbdaec6ed76e5d9367c6a83222f9153db82",
"index": 7426,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isNumber(self, A):\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n if len(A) == 0:\n return 0\n for c in A:\n if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:\n return 0\n if 'e' in A:\n A = A.split('e')\n if len(A) != 2:\n return 0\n return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))\n return int(self.isnum(A, 0))\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def isNumber(self, A):\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n if len(A) == 0:\n return 0\n for c in A:\n if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:\n return 0\n if 'e' in A:\n A = A.split('e')\n if len(A) != 2:\n return 0\n return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))\n return int(self.isnum(A, 0))\n\n def isnum(self, A, i):\n if A == '':\n return False\n if i == 1 or i == 0 and '.' not in A:\n if A[0] in ['+', '-']:\n A = A[1:]\n if A == '':\n return False\n for c in A:\n if c not in [str(i) for i in range(10)]:\n return False\n return True\n A = A.split('.')\n return (self.isnum(A[0], 1) or A[0] == '') and self.isnum(A[1], 1)\n",
"step-5": "'''\nPlease Note:\nNote: It is intended for some problems to be ambiguous. You should gather all requirements up front before implementing one.\n\nPlease think of all the corner cases and clarifications yourself.\n\nValidate if a given string is numeric.\n\nExamples:\n\n1.\"0\" => true\n2.\" 0.1 \" => true\n3.\"abc\" => false\n4.\"1 a\" => false\n5.\"2e10\" => true\nReturn 0 / 1 ( 0 for false, 1 for true ) for this problem\n\nClarify the question using “See Expected Output”\n\n1.Is 1u ( which may be a representation for unsigned integers valid?\nFor this problem, no.\n2.Is 0.1e10 valid?\nYes\n3.-01.1e-10?\nYes\n4.Hexadecimal numbers like 0xFF?\nNot for the purpose of this problem\n5. 3. (. not followed by a digit)?\nNo\n6.Can exponent have decimal numbers? 3e0.1?\nNot for this problem.\n7.Is 1f ( floating point number with f as prefix ) valid?\nNot for this problem.\n8.How about 1000LL or 1000L ( C++ representation for long and long long numbers )?\nNot for this problem.\n9.How about integers preceded by 00 or 0? like 008?\nYes for this problem\n'''\nclass Solution:\n # @param A : string\n # @return an integer\n def isNumber(self, A):\n while len(A)>0 and A[0]==' ':\n A = A[1:]\n A=A[::-1]\n while len(A)>0 and A[0]==' ':\n A = A[1:]\n A=A[::-1]\n if len(A)==0:\n return 0\n for c in A:\n if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:\n return 0\n if 'e' in A:\n A = A.split('e')\n if len(A)!=2:\n return 0\n return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))\n return int(self.isnum(A, 0))\n \n def isnum(self, A, i):\n #print(A,i)\n if A=='':\n return False\n if i == 1 or (i == 0 and '.' not in A):\n if A[0] in ['+', '-']:\n A = A[1:]\n if A == '':\n return False\n for c in A:\n if c not in [str(i) for i in range(10)]:\n return False\n return True\n A = A.split('.')\n return (self.isnum(A[0], 1) or A[0]=='') and self.isnum(A[1], 1)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/oasis/scratch/csd181/mdburns/python/bin/python
import sys
import pickle
import base64
from process import process
import multiprocessing as mp
EPOCH_LENGTH=.875
EPOCH_OFFSET=.125
NUM_FOLDS=5
if __name__ == "__main__":
mp.freeze_support()
p= mp.Pool(2)
for instr in sys.stdin:
this_key=''
sys.stderr.write('mapper: begin receiving data\n')
instr = instr.strip()
keystr, valstr = instr.split('\t', 1)
sys.stderr.write('mapper: key_string ' + keystr + '\n')
this_key, this_id = keystr.split('.', 1)
sys.stderr.write('mapper: key is ' + keystr +'\n')
sys.stderr.write('mapper: this_key is ' + this_key +'\n')
sys.stderr.write('mapper: this_id is ' + this_id +'\n')
v = pickle.loads(base64.decodestring(valstr))
y = v[0].reshape((-1,1))
eeg = pickle.loads(v[1])
try:
rov = process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p)
result = {'id':this_id, 'rov':rov }
except:
sys.stderr.write('mapper: process failed\n')
continue
this_val = base64.b64encode(pickle.dumps(result, protocol=2))
if this_key != '':
print '%s\t%s' % (this_key, this_val)
p.close()
sys.stderr.write('mapper: good job\n')
|
normal
|
{
"blob_id": "e477a59e86cfeb3f26db1442a05d0052a45c42ff",
"index": 6397,
"step-1": "#!/oasis/scratch/csd181/mdburns/python/bin/python\nimport sys\nimport pickle\nimport base64\nfrom process import process\nimport multiprocessing as mp\n\nEPOCH_LENGTH=.875\nEPOCH_OFFSET=.125\nNUM_FOLDS=5\n\nif __name__ == \"__main__\":\n mp.freeze_support()\n\np= mp.Pool(2)\n\nfor instr in sys.stdin:\n this_key=''\n sys.stderr.write('mapper: begin receiving data\\n')\n instr = instr.strip()\n keystr, valstr = instr.split('\\t', 1)\n sys.stderr.write('mapper: key_string ' + keystr + '\\n')\n\n this_key, this_id = keystr.split('.', 1)\n sys.stderr.write('mapper: key is ' + keystr +'\\n')\n sys.stderr.write('mapper: this_key is ' + this_key +'\\n')\n sys.stderr.write('mapper: this_id is ' + this_id +'\\n')\n\n v = pickle.loads(base64.decodestring(valstr))\n y = v[0].reshape((-1,1))\n eeg = pickle.loads(v[1])\n\n try:\n rov = process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p)\n result = {'id':this_id, 'rov':rov }\n\n except:\n sys.stderr.write('mapper: process failed\\n')\n continue\n\n this_val = base64.b64encode(pickle.dumps(result, protocol=2))\n\n if this_key != '':\n print '%s\\t%s' % (this_key, this_val)\n\np.close()\nsys.stderr.write('mapper: good job\\n')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def create_backup(ServerName=None, Description=None):
"""
Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.
Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.create_backup(
ServerName='string',
Description='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to back up.
:type Description: string
:param Description: A user-defined description of the backup.
:rtype: dict
:return: {
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=
None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=
None, ServerName=None, InstanceProfileArn=None, InstanceType=None,
KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=
None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,
BackupId=None):
"""
Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.
This operation is asnychronous.
A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.
When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: AWS API Documentation
:example: response = client.create_server(
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .
:type Engine: string
:param Engine: The configuration management engine to use. Valid values include Chef .
:type EngineModel: string
:param EngineModel: The engine model, or option. Valid values include Single .
:type EngineVersion: string
:param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.
:type EngineAttributes: list
:param EngineAttributes: Engine attributes on a specified server.
Attributes accepted in a createServer request:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: [REQUIRED]
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .
:type KeyPair: string
:param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.
Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:
HH:MM for daily backups
DDD:HH:MM for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .
If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.
:type SubnetIds: list
:param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.
For more information about supported Amazon EC2 platforms, see Supported Platforms .
(string) --
:type BackupId: string
:param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def delete_backup(BackupId=None):
"""
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.delete_backup(
BackupId='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .
:rtype: dict
:return: {}
"""
pass
def delete_server(ServerName=None):
"""
Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.delete_server(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The ID of the server to delete.
:rtype: dict
:return: {}
"""
pass
def describe_account_attributes():
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.describe_account_attributes()
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
"""
pass
def describe_backups(BackupId=None, ServerName=None, NextToken=None,
MaxResults=None):
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
:type BackupId: string
:param BackupId: Describes a single backup.
:type ServerName: string
:param ServerName: Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
<|reserved_special_token_0|>
def describe_node_association_status(NodeAssociationStatusToken=None,
ServerName=None):
"""
See also: AWS API Documentation
:example: response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: [REQUIRED]
:type ServerName: string
:param ServerName: [REQUIRED]
:rtype: dict
:return: {
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'
}
:returns:
(dict) --
NodeAssociationStatus (string) --
"""
pass
<|reserved_special_token_0|>
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,
HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def restore_server(BackupId=None, ServerName=None, InstanceType=None,
KeyPair=None):
"""
Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.
This operation is asynchronous.
A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def associate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
<|reserved_special_token_0|>
def create_backup(ServerName=None, Description=None):
"""
Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.
Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.create_backup(
ServerName='string',
Description='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to back up.
:type Description: string
:param Description: A user-defined description of the backup.
:rtype: dict
:return: {
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=
None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=
None, ServerName=None, InstanceProfileArn=None, InstanceType=None,
KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=
None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,
BackupId=None):
"""
Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.
This operation is asnychronous.
A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.
When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: AWS API Documentation
:example: response = client.create_server(
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .
:type Engine: string
:param Engine: The configuration management engine to use. Valid values include Chef .
:type EngineModel: string
:param EngineModel: The engine model, or option. Valid values include Single .
:type EngineVersion: string
:param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.
:type EngineAttributes: list
:param EngineAttributes: Engine attributes on a specified server.
Attributes accepted in a createServer request:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: [REQUIRED]
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .
:type KeyPair: string
:param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.
Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:
HH:MM for daily backups
DDD:HH:MM for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .
If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.
:type SubnetIds: list
:param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.
For more information about supported Amazon EC2 platforms, see Supported Platforms .
(string) --
:type BackupId: string
:param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def delete_backup(BackupId=None):
"""
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.delete_backup(
BackupId='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .
:rtype: dict
:return: {}
"""
pass
def delete_server(ServerName=None):
"""
Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.delete_server(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The ID of the server to delete.
:rtype: dict
:return: {}
"""
pass
def describe_account_attributes():
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.describe_account_attributes()
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
"""
pass
def describe_backups(BackupId=None, ServerName=None, NextToken=None,
MaxResults=None):
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
:type BackupId: string
:param BackupId: Describes a single backup.
:type ServerName: string
:param ServerName: Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
<|reserved_special_token_0|>
def describe_node_association_status(NodeAssociationStatusToken=None,
ServerName=None):
"""
See also: AWS API Documentation
:example: response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: [REQUIRED]
:type ServerName: string
:param ServerName: [REQUIRED]
:rtype: dict
:return: {
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'
}
:returns:
(dict) --
NodeAssociationStatus (string) --
"""
pass
<|reserved_special_token_0|>
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,
HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def restore_server(BackupId=None, ServerName=None, InstanceType=None,
KeyPair=None):
"""
Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.
This operation is asynchronous.
A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def associate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_backup(ServerName=None, Description=None):
"""
Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.
Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.create_backup(
ServerName='string',
Description='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to back up.
:type Description: string
:param Description: A user-defined description of the backup.
:rtype: dict
:return: {
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=
None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=
None, ServerName=None, InstanceProfileArn=None, InstanceType=None,
KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=
None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,
BackupId=None):
"""
Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.
This operation is asnychronous.
A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.
When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: AWS API Documentation
:example: response = client.create_server(
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .
:type Engine: string
:param Engine: The configuration management engine to use. Valid values include Chef .
:type EngineModel: string
:param EngineModel: The engine model, or option. Valid values include Single .
:type EngineVersion: string
:param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.
:type EngineAttributes: list
:param EngineAttributes: Engine attributes on a specified server.
Attributes accepted in a createServer request:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: [REQUIRED]
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .
:type KeyPair: string
:param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.
Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:
HH:MM for daily backups
DDD:HH:MM for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .
If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.
:type SubnetIds: list
:param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.
For more information about supported Amazon EC2 platforms, see Supported Platforms .
(string) --
:type BackupId: string
:param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def delete_backup(BackupId=None):
"""
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.delete_backup(
BackupId='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .
:rtype: dict
:return: {}
"""
pass
def delete_server(ServerName=None):
"""
Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.delete_server(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The ID of the server to delete.
:rtype: dict
:return: {}
"""
pass
def describe_account_attributes():
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.describe_account_attributes()
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
"""
pass
def describe_backups(BackupId=None, ServerName=None, NextToken=None,
MaxResults=None):
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
:type BackupId: string
:param BackupId: Describes a single backup.
:type ServerName: string
:param ServerName: Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
<|reserved_special_token_0|>
def describe_node_association_status(NodeAssociationStatusToken=None,
ServerName=None):
"""
See also: AWS API Documentation
:example: response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: [REQUIRED]
:type ServerName: string
:param ServerName: [REQUIRED]
:rtype: dict
:return: {
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'
}
:returns:
(dict) --
NodeAssociationStatus (string) --
"""
pass
<|reserved_special_token_0|>
def disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,
HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def restore_server(BackupId=None, ServerName=None, InstanceType=None,
KeyPair=None):
"""
Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.
This operation is asynchronous.
A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
<|reserved_special_token_0|>
def update_server(DisableAutomatedBackup=None, BackupRetentionCount=None,
ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow
=None):
"""
Updates settings for a server.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount: Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def update_server_engine_attributes(ServerName=None, AttributeName=None,
AttributeValue=None):
"""
Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.
This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).
This operation is asynchronous.
This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type AttributeName: string
:param AttributeName: [REQUIRED]
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue: The value to set for the attribute.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def associate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_backup(ServerName=None, Description=None):
"""
Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.
Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.create_backup(
ServerName='string',
Description='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to back up.
:type Description: string
:param Description: A user-defined description of the backup.
:rtype: dict
:return: {
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=
None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=
None, ServerName=None, InstanceProfileArn=None, InstanceType=None,
KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=
None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,
BackupId=None):
"""
Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.
This operation is asnychronous.
A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.
When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: AWS API Documentation
:example: response = client.create_server(
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .
:type Engine: string
:param Engine: The configuration management engine to use. Valid values include Chef .
:type EngineModel: string
:param EngineModel: The engine model, or option. Valid values include Single .
:type EngineVersion: string
:param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.
:type EngineAttributes: list
:param EngineAttributes: Engine attributes on a specified server.
Attributes accepted in a createServer request:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: [REQUIRED]
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .
:type KeyPair: string
:param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.
Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:
HH:MM for daily backups
DDD:HH:MM for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .
If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.
:type SubnetIds: list
:param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.
For more information about supported Amazon EC2 platforms, see Supported Platforms .
(string) --
:type BackupId: string
:param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def delete_backup(BackupId=None):
"""
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.delete_backup(
BackupId='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .
:rtype: dict
:return: {}
"""
pass
def delete_server(ServerName=None):
"""
Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.delete_server(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The ID of the server to delete.
:rtype: dict
:return: {}
"""
pass
def describe_account_attributes():
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.describe_account_attributes()
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
"""
pass
def describe_backups(BackupId=None, ServerName=None, NextToken=None,
MaxResults=None):
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
:type BackupId: string
:param BackupId: Describes a single backup.
:type ServerName: string
:param ServerName: Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def describe_events(ServerName=None, NextToken=None, MaxResults=None):
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeEvents again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def describe_node_association_status(NodeAssociationStatusToken=None,
ServerName=None):
"""
See also: AWS API Documentation
:example: response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: [REQUIRED]
:type ServerName: string
:param ServerName: [REQUIRED]
:rtype: dict
:return: {
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'
}
:returns:
(dict) --
NodeAssociationStatus (string) --
"""
pass
def describe_servers(ServerName=None, NextToken=None, MaxResults=None):
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks for Chef Automate does not query other services.
This operation is synchronous.
A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
:type ServerName: string
:param ServerName: Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Servers': [
{
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,
HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def restore_server(BackupId=None, ServerName=None, InstanceType=None,
KeyPair=None):
"""
Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.
This operation is asynchronous.
A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def start_maintenance(ServerName=None):
"""
Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server will switch to UNDER_MAINTENANCE state, while maintenace is in progress.
Maintenace can only be started for HEALTHY and UNHEALTHY servers. A InvalidStateException is thrown otherwise. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.start_maintenance(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server on which to run maintenance.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
(string) --
"""
pass
def update_server(DisableAutomatedBackup=None, BackupRetentionCount=None,
ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow
=None):
"""
Updates settings for a server.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount: Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def update_server_engine_attributes(ServerName=None, AttributeName=None,
AttributeValue=None):
"""
Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.
This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).
This operation is asynchronous.
This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type AttributeName: string
:param AttributeName: [REQUIRED]
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue: The value to set for the attribute.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
<|reserved_special_token_1|>
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_backup(ServerName=None, Description=None):
"""
Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.
Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.
This operation is asnychronous.
By default 50 manual backups can be created.
A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.create_backup(
ServerName='string',
Description='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to back up.
:type Description: string
:param Description: A user-defined description of the backup.
:rtype: dict
:return: {
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=None, ServerName=None, InstanceProfileArn=None, InstanceType=None, KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None, BackupId=None):
"""
Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.
This operation is asnychronous.
A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.
When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.
By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: AWS API Documentation
:example: response = client.create_server(
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .
:type Engine: string
:param Engine: The configuration management engine to use. Valid values include Chef .
:type EngineModel: string
:param EngineModel: The engine model, or option. Valid values include Single .
:type EngineVersion: string
:param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.
:type EngineAttributes: list
:param EngineAttributes: Engine attributes on a specified server.
Attributes accepted in a createServer request:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: [REQUIRED]
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .
:type KeyPair: string
:param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.
Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:
HH:MM for daily backups
DDD:HH:MM for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .
If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.
:type SubnetIds: list
:param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.
For more information about supported Amazon EC2 platforms, see Supported Platforms .
(string) --
:type BackupId: string
:param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def delete_backup(BackupId=None):
"""
Deletes a backup. You can delete both manual and automated backups.
This operation is asynchronous.
A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.
See also: AWS API Documentation
:example: response = client.delete_backup(
BackupId='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .
:rtype: dict
:return: {}
"""
pass
def delete_server(ServerName=None):
"""
Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.delete_server(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The ID of the server to delete.
:rtype: dict
:return: {}
"""
pass
def describe_account_attributes():
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.describe_account_attributes()
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
"""
pass
def describe_backups(BackupId=None, ServerName=None, NextToken=None, MaxResults=None):
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
:type BackupId: string
:param BackupId: Describes a single backup.
:type ServerName: string
:param ServerName: Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def describe_events(ServerName=None, NextToken=None, MaxResults=None):
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeEvents again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def describe_node_association_status(NodeAssociationStatusToken=None, ServerName=None):
"""
See also: AWS API Documentation
:example: response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: [REQUIRED]
:type ServerName: string
:param ServerName: [REQUIRED]
:rtype: dict
:return: {
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'
}
:returns:
(dict) --
NodeAssociationStatus (string) --
"""
pass
def describe_servers(ServerName=None, NextToken=None, MaxResults=None):
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks for Chef Automate does not query other services.
This operation is synchronous.
A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
:type ServerName: string
:param ServerName: Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.
:type MaxResults: integer
:param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
:rtype: dict
:return: {
'Servers': [
{
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):
"""
See also: AWS API Documentation
:example: response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type ServerName: string
:param ServerName: [REQUIRED]
:type NodeName: string
:param NodeName: [REQUIRED]
:type EngineAttributes: list
:param EngineAttributes:
(dict) --A name/value pair that is specific to the engine of the server.
Name (string) --The name of the engine attribute.
Value (string) --The value of the engine attribute.
:rtype: dict
:return: {
'NodeAssociationStatusToken': 'string'
}
:returns:
(dict) --
NodeAssociationStatusToken (string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def restore_server(BackupId=None, ServerName=None, InstanceType=None, KeyPair=None):
"""
Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.
This operation is asynchronous.
A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
:type BackupId: string
:param BackupId: [REQUIRED]
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def start_maintenance(ServerName=None):
"""
Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server will switch to UNDER_MAINTENANCE state, while maintenace is in progress.
Maintenace can only be started for HEALTHY and UNHEALTHY servers. A InvalidStateException is thrown otherwise. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.start_maintenance(
ServerName='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server on which to run maintenance.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
(string) --
"""
pass
def update_server(DisableAutomatedBackup=None, BackupRetentionCount=None, ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None):
"""
Updates settings for a server.
This operation is synchronous.
See also: AWS API Documentation
:example: response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount: Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
DDD:HH:MM (weekly start time) or HH:MM (daily start time).
Time windows always use coordinated universal time (UTC).
Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
def update_server_engine_attributes(ServerName=None, AttributeName=None, AttributeValue=None):
"""
Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.
This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).
This operation is asynchronous.
This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.
See also: AWS API Documentation
:example: response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
:type ServerName: string
:param ServerName: [REQUIRED]
The name of the server to update.
:type AttributeName: string
:param AttributeName: [REQUIRED]
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue: The value to set for the attribute.
:rtype: dict
:return: {
'Server': {
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
:returns:
CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
"""
pass
|
flexible
|
{
"blob_id": "1947bd280234189ed35277c449cd708a204ea7a4",
"index": 6651,
"step-1": "<mask token>\n\n\ndef create_backup(ServerName=None, Description=None):\n \"\"\"\n Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.\n Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.\n This operation is asnychronous.\n By default 50 manual backups can be created.\n A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n ServerName='string',\n Description='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to back up.\n \n\n :type Description: string\n :param Description: A user-defined description of the backup.\n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\ndef create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=\n None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=\n None, ServerName=None, InstanceProfileArn=None, InstanceType=None,\n KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=\n None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,\n BackupId=None):\n \"\"\"\n Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.\n This operation is asnychronous.\n A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.\n When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.\n By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.\n See also: AWS API Documentation\n \n \n :example: response = client.create_server(\n DisableAutomatedBackup=True|False,\n Engine='string',\n EngineModel='string',\n EngineVersion='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n BackupRetentionCount=123,\n ServerName='string',\n InstanceProfileArn='string',\n InstanceType='string',\n KeyPair='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string',\n SecurityGroupIds=[\n 'string',\n ],\n ServiceRoleArn='string',\n SubnetIds=[\n 'string',\n ],\n BackupId='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .\n\n :type Engine: string\n :param Engine: The configuration management engine to use. Valid values include Chef .\n\n :type EngineModel: string\n :param EngineModel: The engine model, or option. Valid values include Single .\n\n :type EngineVersion: string\n :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.\n\n :type EngineAttributes: list\n :param EngineAttributes: Engine attributes on a specified server.\n Attributes accepted in a createServer request:\n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.\n \n\n :type InstanceProfileArn: string\n :param InstanceProfileArn: [REQUIRED]\n The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.\n \n\n :type InstanceType: string\n :param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .\n\n :type KeyPair: string\n :param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.\n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.\n Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:\n HH:MM for daily backups\n DDD:HH:MM for weekly backups\n The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.\n Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .\n If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).\n (string) --\n \n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: [REQUIRED]\n The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.\n \n\n :type SubnetIds: list\n :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.\n Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.\n EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.\n For more information about supported Amazon EC2 platforms, see Supported Platforms .\n (string) --\n \n\n :type BackupId: string\n :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef delete_backup(BackupId=None):\n \"\"\"\n Deletes a backup. You can delete both manual and automated backups.\n This operation is asynchronous.\n A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef delete_server(ServerName=None):\n \"\"\"\n Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.\n This operation is asynchronous.\n A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_server(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The ID of the server to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef describe_account_attributes():\n \"\"\"\n Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_attributes()\n \n \n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Name': 'string',\n 'Maximum': 123,\n 'Used': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n\ndef describe_backups(BackupId=None, ServerName=None, NextToken=None,\n MaxResults=None):\n \"\"\"\n Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n BackupId='string',\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type BackupId: string\n :param BackupId: Describes a single backup.\n\n :type ServerName: string\n :param ServerName: Returns backups for the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef describe_node_association_status(NodeAssociationStatusToken=None,\n ServerName=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.describe_node_association_status(\n NodeAssociationStatusToken='string',\n ServerName='string'\n )\n \n \n :type NodeAssociationStatusToken: string\n :param NodeAssociationStatusToken: [REQUIRED]\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatus (string) --\n \n \n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,\n HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\n\ndef get_waiter():\n \"\"\"\n \n \"\"\"\n pass\n\n\ndef restore_server(BackupId=None, ServerName=None, InstanceType=None,\n KeyPair=None):\n \"\"\"\n Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.\n This operation is asynchronous.\n A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_server(\n BackupId='string',\n ServerName='string',\n InstanceType='string',\n KeyPair='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup that you want to use to restore a server.\n \n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to restore.\n \n\n :type InstanceType: string\n :param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.\n\n :type KeyPair: string\n :param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef associate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.associate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef create_backup(ServerName=None, Description=None):\n \"\"\"\n Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.\n Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.\n This operation is asnychronous.\n By default 50 manual backups can be created.\n A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n ServerName='string',\n Description='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to back up.\n \n\n :type Description: string\n :param Description: A user-defined description of the backup.\n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\ndef create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=\n None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=\n None, ServerName=None, InstanceProfileArn=None, InstanceType=None,\n KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=\n None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,\n BackupId=None):\n \"\"\"\n Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.\n This operation is asnychronous.\n A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.\n When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.\n By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.\n See also: AWS API Documentation\n \n \n :example: response = client.create_server(\n DisableAutomatedBackup=True|False,\n Engine='string',\n EngineModel='string',\n EngineVersion='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n BackupRetentionCount=123,\n ServerName='string',\n InstanceProfileArn='string',\n InstanceType='string',\n KeyPair='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string',\n SecurityGroupIds=[\n 'string',\n ],\n ServiceRoleArn='string',\n SubnetIds=[\n 'string',\n ],\n BackupId='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .\n\n :type Engine: string\n :param Engine: The configuration management engine to use. Valid values include Chef .\n\n :type EngineModel: string\n :param EngineModel: The engine model, or option. Valid values include Single .\n\n :type EngineVersion: string\n :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.\n\n :type EngineAttributes: list\n :param EngineAttributes: Engine attributes on a specified server.\n Attributes accepted in a createServer request:\n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.\n \n\n :type InstanceProfileArn: string\n :param InstanceProfileArn: [REQUIRED]\n The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.\n \n\n :type InstanceType: string\n :param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .\n\n :type KeyPair: string\n :param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.\n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.\n Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:\n HH:MM for daily backups\n DDD:HH:MM for weekly backups\n The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.\n Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .\n If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).\n (string) --\n \n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: [REQUIRED]\n The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.\n \n\n :type SubnetIds: list\n :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.\n Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.\n EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.\n For more information about supported Amazon EC2 platforms, see Supported Platforms .\n (string) --\n \n\n :type BackupId: string\n :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef delete_backup(BackupId=None):\n \"\"\"\n Deletes a backup. You can delete both manual and automated backups.\n This operation is asynchronous.\n A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef delete_server(ServerName=None):\n \"\"\"\n Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.\n This operation is asynchronous.\n A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_server(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The ID of the server to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef describe_account_attributes():\n \"\"\"\n Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_attributes()\n \n \n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Name': 'string',\n 'Maximum': 123,\n 'Used': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n\ndef describe_backups(BackupId=None, ServerName=None, NextToken=None,\n MaxResults=None):\n \"\"\"\n Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n BackupId='string',\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type BackupId: string\n :param BackupId: Describes a single backup.\n\n :type ServerName: string\n :param ServerName: Returns backups for the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef describe_node_association_status(NodeAssociationStatusToken=None,\n ServerName=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.describe_node_association_status(\n NodeAssociationStatusToken='string',\n ServerName='string'\n )\n \n \n :type NodeAssociationStatusToken: string\n :param NodeAssociationStatusToken: [REQUIRED]\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatus (string) --\n \n \n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,\n HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\n\ndef get_waiter():\n \"\"\"\n \n \"\"\"\n pass\n\n\ndef restore_server(BackupId=None, ServerName=None, InstanceType=None,\n KeyPair=None):\n \"\"\"\n Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.\n This operation is asynchronous.\n A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_server(\n BackupId='string',\n ServerName='string',\n InstanceType='string',\n KeyPair='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup that you want to use to restore a server.\n \n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to restore.\n \n\n :type InstanceType: string\n :param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.\n\n :type KeyPair: string\n :param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef associate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.associate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\n\ndef create_backup(ServerName=None, Description=None):\n \"\"\"\n Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.\n Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.\n This operation is asnychronous.\n By default 50 manual backups can be created.\n A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n ServerName='string',\n Description='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to back up.\n \n\n :type Description: string\n :param Description: A user-defined description of the backup.\n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\ndef create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=\n None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=\n None, ServerName=None, InstanceProfileArn=None, InstanceType=None,\n KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=\n None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,\n BackupId=None):\n \"\"\"\n Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.\n This operation is asnychronous.\n A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.\n When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.\n By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.\n See also: AWS API Documentation\n \n \n :example: response = client.create_server(\n DisableAutomatedBackup=True|False,\n Engine='string',\n EngineModel='string',\n EngineVersion='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n BackupRetentionCount=123,\n ServerName='string',\n InstanceProfileArn='string',\n InstanceType='string',\n KeyPair='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string',\n SecurityGroupIds=[\n 'string',\n ],\n ServiceRoleArn='string',\n SubnetIds=[\n 'string',\n ],\n BackupId='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .\n\n :type Engine: string\n :param Engine: The configuration management engine to use. Valid values include Chef .\n\n :type EngineModel: string\n :param EngineModel: The engine model, or option. Valid values include Single .\n\n :type EngineVersion: string\n :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.\n\n :type EngineAttributes: list\n :param EngineAttributes: Engine attributes on a specified server.\n Attributes accepted in a createServer request:\n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.\n \n\n :type InstanceProfileArn: string\n :param InstanceProfileArn: [REQUIRED]\n The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.\n \n\n :type InstanceType: string\n :param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .\n\n :type KeyPair: string\n :param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.\n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.\n Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:\n HH:MM for daily backups\n DDD:HH:MM for weekly backups\n The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.\n Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .\n If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).\n (string) --\n \n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: [REQUIRED]\n The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.\n \n\n :type SubnetIds: list\n :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.\n Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.\n EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.\n For more information about supported Amazon EC2 platforms, see Supported Platforms .\n (string) --\n \n\n :type BackupId: string\n :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef delete_backup(BackupId=None):\n \"\"\"\n Deletes a backup. You can delete both manual and automated backups.\n This operation is asynchronous.\n A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef delete_server(ServerName=None):\n \"\"\"\n Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.\n This operation is asynchronous.\n A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_server(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The ID of the server to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef describe_account_attributes():\n \"\"\"\n Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_attributes()\n \n \n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Name': 'string',\n 'Maximum': 123,\n 'Used': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n\ndef describe_backups(BackupId=None, ServerName=None, NextToken=None,\n MaxResults=None):\n \"\"\"\n Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n BackupId='string',\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type BackupId: string\n :param BackupId: Describes a single backup.\n\n :type ServerName: string\n :param ServerName: Returns backups for the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef describe_node_association_status(NodeAssociationStatusToken=None,\n ServerName=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.describe_node_association_status(\n NodeAssociationStatusToken='string',\n ServerName='string'\n )\n \n \n :type NodeAssociationStatusToken: string\n :param NodeAssociationStatusToken: [REQUIRED]\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatus (string) --\n \n \n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,\n HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\n\ndef get_waiter():\n \"\"\"\n \n \"\"\"\n pass\n\n\ndef restore_server(BackupId=None, ServerName=None, InstanceType=None,\n KeyPair=None):\n \"\"\"\n Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.\n This operation is asynchronous.\n A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_server(\n BackupId='string',\n ServerName='string',\n InstanceType='string',\n KeyPair='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup that you want to use to restore a server.\n \n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to restore.\n \n\n :type InstanceType: string\n :param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.\n\n :type KeyPair: string\n :param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef update_server(DisableAutomatedBackup=None, BackupRetentionCount=None,\n ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow\n =None):\n \"\"\"\n Updates settings for a server.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server(\n DisableAutomatedBackup=True|False,\n BackupRetentionCount=123,\n ServerName='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.\n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: Sets the number of automated backups that you want to keep.\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server to update.\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: \n DDD:HH:MM (weekly start time) or HH:MM (daily start time).\n Time windows always use coordinated universal time (UTC).\n Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: \n DDD:HH:MM (weekly start time) or HH:MM (daily start time).\n Time windows always use coordinated universal time (UTC).\n Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.\n \n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef update_server_engine_attributes(ServerName=None, AttributeName=None,\n AttributeValue=None):\n \"\"\"\n Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.\n This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).\n This operation is asynchronous.\n This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server_engine_attributes(\n ServerName='string',\n AttributeName='string',\n AttributeValue='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server to update.\n \n\n :type AttributeName: string\n :param AttributeName: [REQUIRED]\n The name of the engine attribute to update.\n \n\n :type AttributeValue: string\n :param AttributeValue: The value to set for the attribute.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n",
"step-4": "<mask token>\n\n\ndef associate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.associate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\n\ndef create_backup(ServerName=None, Description=None):\n \"\"\"\n Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.\n Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.\n This operation is asnychronous.\n By default 50 manual backups can be created.\n A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n ServerName='string',\n Description='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to back up.\n \n\n :type Description: string\n :param Description: A user-defined description of the backup.\n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\ndef create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=\n None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=\n None, ServerName=None, InstanceProfileArn=None, InstanceType=None,\n KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=\n None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None,\n BackupId=None):\n \"\"\"\n Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.\n This operation is asnychronous.\n A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.\n When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.\n By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.\n See also: AWS API Documentation\n \n \n :example: response = client.create_server(\n DisableAutomatedBackup=True|False,\n Engine='string',\n EngineModel='string',\n EngineVersion='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n BackupRetentionCount=123,\n ServerName='string',\n InstanceProfileArn='string',\n InstanceType='string',\n KeyPair='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string',\n SecurityGroupIds=[\n 'string',\n ],\n ServiceRoleArn='string',\n SubnetIds=[\n 'string',\n ],\n BackupId='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .\n\n :type Engine: string\n :param Engine: The configuration management engine to use. Valid values include Chef .\n\n :type EngineModel: string\n :param EngineModel: The engine model, or option. Valid values include Single .\n\n :type EngineVersion: string\n :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.\n\n :type EngineAttributes: list\n :param EngineAttributes: Engine attributes on a specified server.\n Attributes accepted in a createServer request:\n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.\n \n\n :type InstanceProfileArn: string\n :param InstanceProfileArn: [REQUIRED]\n The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.\n \n\n :type InstanceType: string\n :param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .\n\n :type KeyPair: string\n :param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.\n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.\n Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:\n HH:MM for daily backups\n DDD:HH:MM for weekly backups\n The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.\n Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .\n If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).\n (string) --\n \n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: [REQUIRED]\n The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.\n \n\n :type SubnetIds: list\n :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.\n Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.\n EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.\n For more information about supported Amazon EC2 platforms, see Supported Platforms .\n (string) --\n \n\n :type BackupId: string\n :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef delete_backup(BackupId=None):\n \"\"\"\n Deletes a backup. You can delete both manual and automated backups.\n This operation is asynchronous.\n A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef delete_server(ServerName=None):\n \"\"\"\n Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.\n This operation is asynchronous.\n A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_server(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The ID of the server to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\n\ndef describe_account_attributes():\n \"\"\"\n Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_attributes()\n \n \n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Name': 'string',\n 'Maximum': 123,\n 'Used': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\n\ndef describe_backups(BackupId=None, ServerName=None, NextToken=None,\n MaxResults=None):\n \"\"\"\n Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n BackupId='string',\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type BackupId: string\n :param BackupId: Describes a single backup.\n\n :type ServerName: string\n :param ServerName: Returns backups for the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\ndef describe_events(ServerName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Describes events for a specified server. Results are ordered by time, with newest events first.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_events(\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server for which you want to view events.\n \n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeEvents again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ServerEvents': [\n {\n 'CreatedAt': datetime(2015, 1, 1),\n 'ServerName': 'string',\n 'Message': 'string',\n 'LogUrl': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\n\ndef describe_node_association_status(NodeAssociationStatusToken=None,\n ServerName=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.describe_node_association_status(\n NodeAssociationStatusToken='string',\n ServerName='string'\n )\n \n \n :type NodeAssociationStatusToken: string\n :param NodeAssociationStatusToken: [REQUIRED]\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatus (string) --\n \n \n \n \"\"\"\n pass\n\n\ndef describe_servers(ServerName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks for Chef Automate does not query other services.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_servers(\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ServerName: string\n :param ServerName: Describes the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Servers': [\n {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,\n HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\n\ndef get_waiter():\n \"\"\"\n \n \"\"\"\n pass\n\n\ndef restore_server(BackupId=None, ServerName=None, InstanceType=None,\n KeyPair=None):\n \"\"\"\n Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.\n This operation is asynchronous.\n A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_server(\n BackupId='string',\n ServerName='string',\n InstanceType='string',\n KeyPair='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup that you want to use to restore a server.\n \n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to restore.\n \n\n :type InstanceType: string\n :param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.\n\n :type KeyPair: string\n :param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\n\ndef start_maintenance(ServerName=None):\n \"\"\"\n Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server will switch to UNDER_MAINTENANCE state, while maintenace is in progress.\n Maintenace can only be started for HEALTHY and UNHEALTHY servers. A InvalidStateException is thrown otherwise. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.start_maintenance(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server on which to run maintenance.\n \n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\n\ndef update_server(DisableAutomatedBackup=None, BackupRetentionCount=None,\n ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow\n =None):\n \"\"\"\n Updates settings for a server.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server(\n DisableAutomatedBackup=True|False,\n BackupRetentionCount=123,\n ServerName='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.\n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: Sets the number of automated backups that you want to keep.\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server to update.\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: \n DDD:HH:MM (weekly start time) or HH:MM (daily start time).\n Time windows always use coordinated universal time (UTC).\n Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: \n DDD:HH:MM (weekly start time) or HH:MM (daily start time).\n Time windows always use coordinated universal time (UTC).\n Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.\n \n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n\ndef update_server_engine_attributes(ServerName=None, AttributeName=None,\n AttributeValue=None):\n \"\"\"\n Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.\n This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).\n This operation is asynchronous.\n This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server_engine_attributes(\n ServerName='string',\n AttributeName='string',\n AttributeValue='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server to update.\n \n\n :type AttributeName: string\n :param AttributeName: [REQUIRED]\n The name of the engine attribute to update.\n \n\n :type AttributeValue: string\n :param AttributeValue: The value to set for the attribute.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n",
"step-5": "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef associate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.associate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef create_backup(ServerName=None, Description=None):\n \"\"\"\n Creates an application-level backup of a server. While the server is BACKING_UP , the server can not be modified and no additional backup can be created.\n Backups can be created for RUNNING , HEALTHY and UNHEALTHY servers.\n This operation is asnychronous.\n By default 50 manual backups can be created.\n A LimitExceededException is thrown then the maximum number of manual backup is reached. A InvalidStateException is thrown when the server is not in any of RUNNING, HEALTHY, UNHEALTHY. A ResourceNotFoundException is thrown when the server is not found. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.create_backup(\n ServerName='string',\n Description='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to back up.\n \n\n :type Description: string\n :param Description: A user-defined description of the backup.\n\n :rtype: dict\n :return: {\n 'Backup': {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=None, ServerName=None, InstanceProfileArn=None, InstanceType=None, KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None, BackupId=None):\n \"\"\"\n Creates and immedately starts a new Server. The server can be used once it has reached the HEALTHY state.\n This operation is asnychronous.\n A LimitExceededException is thrown then the maximum number of server backup is reached. A ResourceAlreadyExistsException is raise when a server with the same name already exists in the account. A ResourceNotFoundException is thrown when a backupId is passed, but the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n By default 10 servers can be created. A LimitExceededException is raised when the limit is exceeded.\n When no security groups are provided by using SecurityGroupIds , AWS OpsWorks creates a new security group. This security group opens the Chef server to the world on TCP port 443. If a KeyName is present, SSH access is enabled. SSH is also open to the world on TCP port 22.\n By default, the Chef Server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.\n See also: AWS API Documentation\n \n \n :example: response = client.create_server(\n DisableAutomatedBackup=True|False,\n Engine='string',\n EngineModel='string',\n EngineVersion='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n BackupRetentionCount=123,\n ServerName='string',\n InstanceProfileArn='string',\n InstanceType='string',\n KeyPair='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string',\n SecurityGroupIds=[\n 'string',\n ],\n ServiceRoleArn='string',\n SubnetIds=[\n 'string',\n ],\n BackupId='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Enable or disable scheduled backups. Valid values are true or false . The default value is true .\n\n :type Engine: string\n :param Engine: The configuration management engine to use. Valid values include Chef .\n\n :type EngineModel: string\n :param EngineModel: The engine model, or option. Valid values include Single .\n\n :type EngineVersion: string\n :param EngineVersion: The major release version of the engine that you want to use. Values depend on the engine that you choose.\n\n :type EngineAttributes: list\n :param EngineAttributes: Engine attributes on a specified server.\n Attributes accepted in a createServer request:\n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is not stored by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks for Chef Automate deletes the oldest backups if this number is exceeded. The default value is 1 .\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 32 characters.\n \n\n :type InstanceProfileArn: string\n :param InstanceProfileArn: [REQUIRED]\n The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the instance profile you need.\n \n\n :type InstanceType: string\n :param InstanceType: The Amazon EC2 instance type to use. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large .\n\n :type KeyPair: string\n :param KeyPair: The Amazon EC2 key pair to set for the instance. You may specify this parameter to connect to your instances by using SSH.\n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: The start time for a one-hour period each week during which AWS OpsWorks for Chef Automate performs maintenance on the instance. Valid values must be specified in the following format: DDD:HH:MM . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See TimeWindowDefinition for more information.\n Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: The start time for a one-hour period during which AWS OpsWorks for Chef Automate backs up application-level data on your server if backups are enabled. Valid values must be specified in one of the following formats:\n HH:MM for daily backups\n DDD:HH:MM for weekly backups\n The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.\n Example: 08:00 , which represents a daily start time of 08:00 UTC.Example: Mon:08:00 , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by SubnetIds .\n If you do not specify this parameter, AWS OpsWorks for Chef Automate creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).\n (string) --\n \n\n :type ServiceRoleArn: string\n :param ServiceRoleArn: [REQUIRED]\n The service role that the AWS OpsWorks for Chef Automate service backend uses to work with your account. Although the AWS OpsWorks console typically creates the service role for you, in this release of AWS OpsWorks for Chef Automate, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-stuff/latest/service-role-creation.yaml. This template creates a stack that includes the service role that you need.\n \n\n :type SubnetIds: list\n :param SubnetIds: The IDs of subnets in which to launch the server EC2 instance.\n Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have 'Auto Assign Public IP' enabled.\n EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have 'Auto Assign Public IP' enabled.\n For more information about supported Amazon EC2 platforms, see Supported Platforms .\n (string) --\n \n\n :type BackupId: string\n :param BackupId: If you specify this field, AWS OpsWorks for Chef Automate creates the server by using the backup represented by BackupId.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\ndef delete_backup(BackupId=None):\n \"\"\"\n Deletes a backup. You can delete both manual and automated backups.\n This operation is asynchronous.\n A InvalidStateException is thrown then a backup is already deleting. A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is thrown when parameters of the request are not valid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_backup(\n BackupId='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ServerName-yyyyMMddHHmmssSSS .\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef delete_server(ServerName=None):\n \"\"\"\n Deletes the server and the underlying AWS CloudFormation stack (including the server's EC2 instance). The server status updated to DELETING . Once the server is successfully deleted, it will no longer be returned by DescribeServer requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.\n This operation is asynchronous.\n A InvalidStateException is thrown then a server is already deleting. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.delete_server(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The ID of the server to delete.\n \n\n :rtype: dict\n :return: {}\n \n \n \"\"\"\n pass\n\ndef describe_account_attributes():\n \"\"\"\n Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_account_attributes()\n \n \n :rtype: dict\n :return: {\n 'Attributes': [\n {\n 'Name': 'string',\n 'Maximum': 123,\n 'Used': 123\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef describe_backups(BackupId=None, ServerName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the backup does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_backups(\n BackupId='string',\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type BackupId: string\n :param BackupId: Describes a single backup.\n\n :type ServerName: string\n :param ServerName: Returns backups for the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeBackups again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Backups': [\n {\n 'BackupArn': 'string',\n 'BackupId': 'string',\n 'BackupType': 'AUTOMATED'|'MANUAL',\n 'CreatedAt': datetime(2015, 1, 1),\n 'Description': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'PreferredBackupWindow': 'string',\n 'PreferredMaintenanceWindow': 'string',\n 'S3DataSize': 123,\n 'S3DataUrl': 'string',\n 'S3LogUrl': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServerName': 'string',\n 'ServiceRoleArn': 'string',\n 'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',\n 'StatusDescription': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ToolsVersion': 'string',\n 'UserArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef describe_events(ServerName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Describes events for a specified server. Results are ordered by time, with newest events first.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_events(\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server for which you want to view events.\n \n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeEvents again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'ServerEvents': [\n {\n 'CreatedAt': datetime(2015, 1, 1),\n 'ServerName': 'string',\n 'Message': 'string',\n 'LogUrl': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef describe_node_association_status(NodeAssociationStatusToken=None, ServerName=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.describe_node_association_status(\n NodeAssociationStatusToken='string',\n ServerName='string'\n )\n \n \n :type NodeAssociationStatusToken: string\n :param NodeAssociationStatusToken: [REQUIRED]\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :rtype: dict\n :return: {\n 'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatus (string) --\n \n \n \n \"\"\"\n pass\n\ndef describe_servers(ServerName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks for Chef Automate does not query other services.\n This operation is synchronous.\n A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.describe_servers(\n ServerName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type ServerName: string\n :param ServerName: Describes the server with the specified ServerName.\n\n :type NextToken: string\n :param NextToken: NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call DescribeServers again, and assign the token from the previous results as the value of the nextToken parameter. If there are no more results, the response object's nextToken parameter value is null . Setting a nextToken value that was not returned in your previous results causes an InvalidNextTokenException to occur.\n\n :type MaxResults: integer\n :param MaxResults: To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.\n\n :rtype: dict\n :return: {\n 'Servers': [\n {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\ndef disassociate_node(ServerName=None, NodeName=None, EngineAttributes=None):\n \"\"\"\n See also: AWS API Documentation\n \n \n :example: response = client.disassociate_node(\n ServerName='string',\n NodeName='string',\n EngineAttributes=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n\n :type NodeName: string\n :param NodeName: [REQUIRED]\n\n :type EngineAttributes: list\n :param EngineAttributes: \n (dict) --A name/value pair that is specific to the engine of the server.\n Name (string) --The name of the engine attribute.\n Value (string) --The value of the engine attribute.\n \n \n\n :rtype: dict\n :return: {\n 'NodeAssociationStatusToken': 'string'\n }\n \n \n :returns: \n (dict) --\n NodeAssociationStatusToken (string) --\n \n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\n ClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method's model.\n\n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is create_foo, and you'd normally invoke the\n operation as client.create_foo(**kwargs), if the\n create_foo operation can be paginated, you can use the\n call client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\n \"\"\"\n pass\n\ndef get_waiter():\n \"\"\"\n \n \"\"\"\n pass\n\ndef restore_server(BackupId=None, ServerName=None, InstanceType=None, KeyPair=None):\n \"\"\"\n Restores a backup to a server that is in a RUNNING , FAILED , or HEALTHY state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of all of the server's client devices should continue to work.\n This operation is asynchronous.\n A InvalidStateException is thrown when the server is not in a valid state. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.restore_server(\n BackupId='string',\n ServerName='string',\n InstanceType='string',\n KeyPair='string'\n )\n \n \n :type BackupId: string\n :param BackupId: [REQUIRED]\n The ID of the backup that you want to use to restore a server.\n \n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server that you want to restore.\n \n\n :type InstanceType: string\n :param InstanceType: The type of the instance to create. Valid values must be specified in the following format: ^([cm][34]|t2).* For example, c3.large . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.\n\n :type KeyPair: string\n :param KeyPair: The name of the key pair to set on the new EC2 instance. This can be helpful if any of the administrators who manage the server no longer have the SSH key.\n\n :rtype: dict\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef start_maintenance(ServerName=None):\n \"\"\"\n Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server will switch to UNDER_MAINTENANCE state, while maintenace is in progress.\n Maintenace can only be started for HEALTHY and UNHEALTHY servers. A InvalidStateException is thrown otherwise. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.start_maintenance(\n ServerName='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server on which to run maintenance.\n \n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef update_server(DisableAutomatedBackup=None, BackupRetentionCount=None, ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None):\n \"\"\"\n Updates settings for a server.\n This operation is synchronous.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server(\n DisableAutomatedBackup=True|False,\n BackupRetentionCount=123,\n ServerName='string',\n PreferredMaintenanceWindow='string',\n PreferredBackupWindow='string'\n )\n \n \n :type DisableAutomatedBackup: boolean\n :param DisableAutomatedBackup: Setting DisableAutomatedBackup to true disables automated or scheduled backups. Automated backups are enabled by default.\n\n :type BackupRetentionCount: integer\n :param BackupRetentionCount: Sets the number of automated backups that you want to keep.\n\n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server to update.\n \n\n :type PreferredMaintenanceWindow: string\n :param PreferredMaintenanceWindow: \n DDD:HH:MM (weekly start time) or HH:MM (daily start time).\n Time windows always use coordinated universal time (UTC).\n Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.\n \n\n :type PreferredBackupWindow: string\n :param PreferredBackupWindow: \n DDD:HH:MM (weekly start time) or HH:MM (daily start time).\n Time windows always use coordinated universal time (UTC).\n Valid strings for day of week (DDD ) are: Mon, Tue, Wed, Thr, Fri, Sat, Sun.\n \n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\ndef update_server_engine_attributes(ServerName=None, AttributeName=None, AttributeValue=None):\n \"\"\"\n Updates engine specific attributes on a specified server. Server will enter the MODIFYING state when this operation is in progress. Only one update can take place at a time.\n This operation can be use to reset Chef Server main API key (CHEF_PIVOTAL_KEY ).\n This operation is asynchronous.\n This operation can only be called for HEALTHY and UNHEALTHY servers. Otherwise a InvalidStateException is raised. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are invalid.\n See also: AWS API Documentation\n \n \n :example: response = client.update_server_engine_attributes(\n ServerName='string',\n AttributeName='string',\n AttributeValue='string'\n )\n \n \n :type ServerName: string\n :param ServerName: [REQUIRED]\n The name of the server to update.\n \n\n :type AttributeName: string\n :param AttributeName: [REQUIRED]\n The name of the engine attribute to update.\n \n\n :type AttributeValue: string\n :param AttributeValue: The value to set for the attribute.\n\n :rtype: dict\n :return: {\n 'Server': {\n 'BackupRetentionCount': 123,\n 'ServerName': 'string',\n 'CreatedAt': datetime(2015, 1, 1),\n 'DisableAutomatedBackup': True|False,\n 'Endpoint': 'string',\n 'Engine': 'string',\n 'EngineModel': 'string',\n 'EngineAttributes': [\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n 'EngineVersion': 'string',\n 'InstanceProfileArn': 'string',\n 'InstanceType': 'string',\n 'KeyPair': 'string',\n 'MaintenanceStatus': 'SUCCESS'|'FAILED',\n 'PreferredMaintenanceWindow': 'string',\n 'PreferredBackupWindow': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'ServiceRoleArn': 'string',\n 'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY',\n 'StatusReason': 'string',\n 'SubnetIds': [\n 'string',\n ],\n 'ServerArn': 'string'\n }\n }\n \n \n :returns: \n CHEF_PIVOTAL_KEY : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.\n CHEF_STARTER_KIT : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.\n \n \"\"\"\n pass\n\n",
"step-ids": [
11,
12,
16,
19,
20
]
}
|
[
11,
12,
16,
19,
20
] |
from selenium import webdriver
from time import sleep
import os.path
import time
import datetime
driver =webdriver.Chrome(executable_path=r'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')
counter=0
while True :
driver.get("https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0")
start='C://Users//Pathak//Downloads//chromedriver_win32'
df=str(counter);
gh=str(time.time())
ft=df+gh+'.png'
final=os.path.join(start,ft)
driver.get_screenshot_as_file(final)
counter+=1
sleep(20)
driver.quit()
|
normal
|
{
"blob_id": "30e7fc169eceb3d8cc1a4fa6bb65d81a4403f2c7",
"index": 5800,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n driver.get(\n 'https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0'\n )\n start = 'C://Users//Pathak//Downloads//chromedriver_win32'\n df = str(counter)\n gh = str(time.time())\n ft = df + gh + '.png'\n final = os.path.join(start, ft)\n driver.get_screenshot_as_file(final)\n counter += 1\n sleep(20)\ndriver.quit()\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome(executable_path=\n 'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')\ncounter = 0\nwhile True:\n driver.get(\n 'https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0'\n )\n start = 'C://Users//Pathak//Downloads//chromedriver_win32'\n df = str(counter)\n gh = str(time.time())\n ft = df + gh + '.png'\n final = os.path.join(start, ft)\n driver.get_screenshot_as_file(final)\n counter += 1\n sleep(20)\ndriver.quit()\n",
"step-4": "from selenium import webdriver\nfrom time import sleep\nimport os.path\nimport time\nimport datetime\ndriver = webdriver.Chrome(executable_path=\n 'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')\ncounter = 0\nwhile True:\n driver.get(\n 'https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0'\n )\n start = 'C://Users//Pathak//Downloads//chromedriver_win32'\n df = str(counter)\n gh = str(time.time())\n ft = df + gh + '.png'\n final = os.path.join(start, ft)\n driver.get_screenshot_as_file(final)\n counter += 1\n sleep(20)\ndriver.quit()\n",
"step-5": "from selenium import webdriver\r\nfrom time import sleep\r\nimport os.path\r\nimport time\r\nimport datetime\r\ndriver =webdriver.Chrome(executable_path=r'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')\r\ncounter=0\r\nwhile True :\r\n\t\r\n\r\n\tdriver.get(\"https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0\")\r\n\tstart='C://Users//Pathak//Downloads//chromedriver_win32'\r\n\tdf=str(counter);\r\n\tgh=str(time.time())\r\n\r\n\tft=df+gh+'.png'\r\n\tfinal=os.path.join(start,ft)\r\n\tdriver.get_screenshot_as_file(final) \r\n\tcounter+=1\r\n\t\r\n\tsleep(20)\r\n\r\ndriver.quit()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.filter(name='range')
def filter_range(start, end=None):
if end is None:
return range(start)
else:
return range(start, end)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.filter(name='range')
def filter_range(start, end=None):
if end is None:
return range(start)
else:
return range(start, end)
<|reserved_special_token_1|>
from django import template
register = template.Library()
@register.filter(name='range')
def filter_range(start, end=None):
if end is None:
return range(start)
else:
return range(start, end)
|
flexible
|
{
"blob_id": "f733885eed5d1cbf6e49db0997655ad627c9d795",
"index": 599,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@register.filter(name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.filter(name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"step-4": "from django import template\nregister = template.Library()\n\n\n@register.filter(name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_lp(s):
"""gets latest prices from google"""
sl = []
for stock in s.symbols:
quote = get(stock, 'LON')
x = quote.replace(',', '')
x = float(x)
sl.append(x)
return sl
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get(symbol, exchange):
url = prefix + '%s:%s' % (exchange, symbol)
u = urllib.request.urlopen(url)
c = u.read().decode('utf-8')
con = c[5:-2]
cont = con.replace('\\', '')
content = json.loads(cont)
result = content['l']
return result
def get_lp(s):
"""gets latest prices from google"""
sl = []
for stock in s.symbols:
quote = get(stock, 'LON')
x = quote.replace(',', '')
x = float(x)
sl.append(x)
return sl
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = Settings()
prefix = 'http://finance.google.com/finance?client=ig&output=json&q='
def get(symbol, exchange):
url = prefix + '%s:%s' % (exchange, symbol)
u = urllib.request.urlopen(url)
c = u.read().decode('utf-8')
con = c[5:-2]
cont = con.replace('\\', '')
content = json.loads(cont)
result = content['l']
return result
def get_lp(s):
"""gets latest prices from google"""
sl = []
for stock in s.symbols:
quote = get(stock, 'LON')
x = quote.replace(',', '')
x = float(x)
sl.append(x)
return sl
<|reserved_special_token_1|>
from share_settings import Settings
import urllib.request, json
import pprint as p
s = Settings()
prefix = 'http://finance.google.com/finance?client=ig&output=json&q='
def get(symbol, exchange):
url = prefix + '%s:%s' % (exchange, symbol)
u = urllib.request.urlopen(url)
c = u.read().decode('utf-8')
con = c[5:-2]
cont = con.replace('\\', '')
content = json.loads(cont)
result = content['l']
return result
def get_lp(s):
"""gets latest prices from google"""
sl = []
for stock in s.symbols:
quote = get(stock, 'LON')
x = quote.replace(',', '')
x = float(x)
sl.append(x)
return sl
<|reserved_special_token_1|>
from share_settings import Settings
import urllib.request,json
import pprint as p
s = Settings()
prefix = "http://finance.google.com/finance?client=ig&output=json&q="
def get(symbol,exchange):
url = prefix+"%s:%s"%(exchange,symbol)
u = urllib.request.urlopen(url)
#translates url to string
c = u.read().decode('utf-8')
#slices string to remove characters at start/end of string
con=(c[5:-2])
#removes '\' from the text
cont=con.replace("\\","")
content = json.loads(cont)
result = (content['l'])
return result
def get_lp(s):
"""gets latest prices from google"""
sl = []
for stock in s.symbols:
#creates a list of latest stock prices
quote = get(stock,"LON")
#changes string to integer and removes ','
x = (quote.replace(',',''))
x = float(x)
sl.append(x)
return sl
#print(get_lp(s))
|
flexible
|
{
"blob_id": "7247ef463998f6738c21ad8efa988a32f7fb99c0",
"index": 4760,
"step-1": "<mask token>\n\n\ndef get_lp(s):\n \"\"\"gets latest prices from google\"\"\"\n sl = []\n for stock in s.symbols:\n quote = get(stock, 'LON')\n x = quote.replace(',', '')\n x = float(x)\n sl.append(x)\n return sl\n",
"step-2": "<mask token>\n\n\ndef get(symbol, exchange):\n url = prefix + '%s:%s' % (exchange, symbol)\n u = urllib.request.urlopen(url)\n c = u.read().decode('utf-8')\n con = c[5:-2]\n cont = con.replace('\\\\', '')\n content = json.loads(cont)\n result = content['l']\n return result\n\n\ndef get_lp(s):\n \"\"\"gets latest prices from google\"\"\"\n sl = []\n for stock in s.symbols:\n quote = get(stock, 'LON')\n x = quote.replace(',', '')\n x = float(x)\n sl.append(x)\n return sl\n",
"step-3": "<mask token>\ns = Settings()\nprefix = 'http://finance.google.com/finance?client=ig&output=json&q='\n\n\ndef get(symbol, exchange):\n url = prefix + '%s:%s' % (exchange, symbol)\n u = urllib.request.urlopen(url)\n c = u.read().decode('utf-8')\n con = c[5:-2]\n cont = con.replace('\\\\', '')\n content = json.loads(cont)\n result = content['l']\n return result\n\n\ndef get_lp(s):\n \"\"\"gets latest prices from google\"\"\"\n sl = []\n for stock in s.symbols:\n quote = get(stock, 'LON')\n x = quote.replace(',', '')\n x = float(x)\n sl.append(x)\n return sl\n",
"step-4": "from share_settings import Settings\nimport urllib.request, json\nimport pprint as p\ns = Settings()\nprefix = 'http://finance.google.com/finance?client=ig&output=json&q='\n\n\ndef get(symbol, exchange):\n url = prefix + '%s:%s' % (exchange, symbol)\n u = urllib.request.urlopen(url)\n c = u.read().decode('utf-8')\n con = c[5:-2]\n cont = con.replace('\\\\', '')\n content = json.loads(cont)\n result = content['l']\n return result\n\n\ndef get_lp(s):\n \"\"\"gets latest prices from google\"\"\"\n sl = []\n for stock in s.symbols:\n quote = get(stock, 'LON')\n x = quote.replace(',', '')\n x = float(x)\n sl.append(x)\n return sl\n",
"step-5": "from share_settings import Settings\nimport urllib.request,json\nimport pprint as p\ns = Settings()\n\nprefix = \"http://finance.google.com/finance?client=ig&output=json&q=\"\n \ndef get(symbol,exchange):\n url = prefix+\"%s:%s\"%(exchange,symbol)\n u = urllib.request.urlopen(url)\n #translates url to string\n c = u.read().decode('utf-8')\n #slices string to remove characters at start/end of string\n con=(c[5:-2])\n #removes '\\' from the text\n cont=con.replace(\"\\\\\",\"\")\n content = json.loads(cont)\n result = (content['l'])\n return result\n\ndef get_lp(s):\n \"\"\"gets latest prices from google\"\"\"\n sl = [] \n for stock in s.symbols: \n #creates a list of latest stock prices\n quote = get(stock,\"LON\")\n #changes string to integer and removes ','\n x = (quote.replace(',',''))\n x = float(x)\n sl.append(x)\n return sl\n\n#print(get_lp(s))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class GameMap(list):
<|reserved_special_token_0|>
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
<|reserved_special_token_0|>
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameMap(list):
<|reserved_special_token_0|>
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
<|reserved_special_token_0|>
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1
] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j
].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[
1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
<|reserved_special_token_0|>
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameMap(list):
<|reserved_special_token_0|>
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
<|reserved_special_token_0|>
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1
] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j
].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[
1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
<|reserved_special_token_0|>
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameMap(list):
"""
The Gamemap module
==================
Creating the Gamemap.
This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.
:Example:
>>> m = GameMap()
.. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`
.. moduleauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>, Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
"""
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
@property
def nb_rhinoceros(self):
"""
This is the number of rinoceros on the gamemap.
:Getter: Return the number of rhinoceros on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> nr = m.nb_rhinoceros
.. note:: The rhinoceros's number can not exceed 5.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
return self.__nb_rhinoceros
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1
] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j
].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[
1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
def save(self, file):
"""
This method save a GameMap in a KingOfSiam file with the .kos extension.
:Args:
:param file (file object): is file in which to write.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
boulders = []
elephants = []
rhinos = []
for i in range(5):
for j in range(5):
if self[i][j] != 0:
piece = self[i][j]
L = []
if not isinstance(self[i][j], Boulder):
L.append(self[i][j].direction[0])
L.append(self[i][j].direction[1])
if piece.species == 'Elephant':
elephants.append('(' + str(i) + ',' + str(j) +
') : np.array([' + str(L[0]) + ',' + str(L[1]) +
'])')
elif piece.species == 'Rhinoceros':
rhinos.append('(' + str(i) + ',' + str(j) +
') : np.array([' + str(L[0]) + ',' + str(L[1]) +
'])')
elif isinstance(piece, Boulder):
boulders.append('(' + str(i) + ',' + str(j) + ')')
file.write('# King of Siam GameFile \n\nplayer_turn {\n ' + self
.playerTurn + '\n}\n\n')
file.write('Boulder {')
for k in range(len(boulders)):
file.write('\n ' + boulders[k] + ';')
file.write('\n}\n\nElephant {')
for elt in elephants:
file.write('\n ' + elt + ';')
file.write('\n}\n\nRhinoceros {')
for elt in rhinos:
file.write('\n ' + elt + ';')
file.write('\n}')
file.close()
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
if __name__ == '__main__':
g = GameMap()
print(g)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Brateaqu, Farolflu"
__copyright__ = "Copyright 2019"
__credits__ = ["Quentin BRATEAU", "Luca FAROLFI"]
__license__ = "GPL"
__version__ = "1.0"
__email__ = ["quentin.brateau@ensta-bretagne.org", "luca.farolfi@ensta-bretagne.org"]
# Importing modules
import numpy as np
from GamePieces import Animal, Boulder
class GameMap(list):
"""
The Gamemap module
==================
Creating the Gamemap.
This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.
:Example:
>>> m = GameMap()
.. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`
.. moduleauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>, Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
"""
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = "Elephant"
self.winner = ""
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3): # Setting up the 3 Boulders
self[2][1+k] = Boulder(2, 1+k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
@property
def nb_rhinoceros(self):
"""
This is the number of rinoceros on the gamemap.
:Getter: Return the number of rhinoceros on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> nr = m.nb_rhinoceros
.. note:: The rhinoceros's number can not exceed 5.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
return self.__nb_rhinoceros
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = "Rhinoceros"
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = "Elephant"
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
return False
def push_counter(self, x, y, cx, cy, counter = 1, k = 0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= (x+cx) <= 4 and 0 <= y+cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ + np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ + np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
(nx, ny) = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) and (animal.direction[0] == cx and animal.direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx) == -1 or (x + i * cx) == 5 or (y + i * cy) == -1 or (y + i * cy) == 5:
if isinstance(self[x + (i-1)*cx][y + (i-1)*cy], Animal):
self[x + (i-1)*cx][y + (i-1)*cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i-1)*cx][y + (i-1)*cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy], Animal) and [self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[0], self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[1]] == [cx, cy]:
self.winner=self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].species
print("winner is", self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy].coords = (x + i * cx, y + i * cy)
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
print("Push not possible")
return (False)
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) or (cx == 0 and cy == 0):
animal.coords = (nx, ny)
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
def save(self, file):
"""
This method save a GameMap in a KingOfSiam file with the .kos extension.
:Args:
:param file (file object): is file in which to write.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
boulders = []
elephants = []
rhinos = []
for i in range(5):
for j in range(5):
if self[i][j]!= 0:
piece = self[i][j]
L = []
if not isinstance(self[i][j], Boulder):
L.append(self[i][j].direction[0])
L.append(self[i][j].direction[1])
if piece.species == "Elephant":
elephants.append("(" + str(i) + "," + str(j)+ ") : np.array(["+str(L[0])+ "," + str(L[1])+"])")
elif piece.species == "Rhinoceros":
rhinos.append("("+str(i)+"," +str(j)+ ") : np.array(["+str(L[0]) + "," + str(L[1])+"])")
elif isinstance(piece, Boulder):
boulders.append("(" + str(i) + "," + str(j) + ")")
file.write("# King of Siam GameFile \n\nplayer_turn {\n " + self.playerTurn + "\n}\n\n")
file.write("Boulder {")
for k in range(len(boulders)):
file.write("\n " + boulders[k] + ";")
file.write("\n}\n\nElephant {")
for elt in elephants:
file.write("\n " + elt + ";")
file.write("\n}\n\nRhinoceros {")
for elt in rhinos:
file.write("\n " + elt + ";")
file.write("\n}")
file.close()
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and "Boulder {" not in f[k]:
k += 1
k += 1
while ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and "Elephant {" not in f[k]:
k += 1
k += 1
while ":" in f[k] and ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split("]")[0].split(",")
xdir, ydir = 0, 0
if d[0] == "1":
xdir = 1
elif d[0] == "-1":
xdir = -1
if d[1] == "1":
ydir = 1
elif d[1] == "-1":
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and "Rhinoceros {" not in f[k]:
k += 1
k += 1
while ":" in f[k] and ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split("]")[0].split(",")
xdir, ydir = 0, 0
if d[0] == "1":
xdir = 1
elif d[0] == "-1":
xdir = -1
if d[1] == "1":
ydir = 1
elif d[1] == "-1":
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
if __name__ == '__main__':
g = GameMap()
print(g)
|
flexible
|
{
"blob_id": "7cb75195df567a5b65fe2385423b0082f3b9de4b",
"index": 1051,
"step-1": "<mask token>\n\n\nclass GameMap(list):\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n <mask token>\n <mask token>\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n <mask token>\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <mask token>\n <mask token>\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GameMap(list):\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n <mask token>\n <mask token>\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n <mask token>\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n <mask token>\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass GameMap(list):\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n <mask token>\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n <mask token>\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>, Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\nif __name__ == '__main__':\n g = GameMap()\n print(g)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n__author__ = \"Brateaqu, Farolflu\"\n__copyright__ = \"Copyright 2019\"\n__credits__ = [\"Quentin BRATEAU\", \"Luca FAROLFI\"]\n\n__license__ = \"GPL\"\n__version__ = \"1.0\"\n__email__ = [\"quentin.brateau@ensta-bretagne.org\", \"luca.farolfi@ensta-bretagne.org\"]\n\n\n# Importing modules\nimport numpy as np\nfrom GamePieces import Animal, Boulder\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>, Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = \"Elephant\"\n self.winner = \"\"\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3): # Setting up the 3 Boulders\n self[2][1+k] = Boulder(2, 1+k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = \"Rhinoceros\"\n\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = \"Elephant\"\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == \"Elephant\":\n self.playerTurn = \"Rhinoceros\"\n elif self.playerTurn == \"Rhinoceros\":\n self.playerTurn = \"Elephant\"\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter = 1, k = 0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= (x+cx) <= 4 and 0 <= y+cy <= 4):\n return counter, k\n\n elif self[x + cx][y + cy] == 0:\n return counter, k\n\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ + np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ + np.array([cx, cy]) == -1:\n counter -= 2\n\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n (nx, ny) = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) and (animal.direction[0] == cx and animal.direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx) == -1 or (x + i * cx) == 5 or (y + i * cy) == -1 or (y + i * cy) == 5:\n if isinstance(self[x + (i-1)*cx][y + (i-1)*cy], Animal):\n self[x + (i-1)*cx][y + (i-1)*cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i-1)*cx][y + (i-1)*cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy], Animal) and [self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[0], self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[1]] == [cx, cy]:\n self.winner=self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].species\n print(\"winner is\", self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy].coords = (x + i * cx, y + i * cy)\n\n if self.playerTurn == \"Elephant\":\n self.playerTurn = \"Rhinoceros\"\n elif self.playerTurn == \"Rhinoceros\":\n self.playerTurn = \"Elephant\"\n else:\n print(\"Push not possible\")\n return (False)\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) or (cx == 0 and cy == 0):\n animal.coords = (nx, ny)\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == \"Elephant\":\n self.playerTurn = \"Rhinoceros\"\n elif self.playerTurn == \"Rhinoceros\":\n self.playerTurn = \"Elephant\"\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <luca.farolfi@ensta-bretagne.org>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <quentin.brateau@ensta-bretagne.org>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n\n f = file.readlines()\n k = 0\n while k < len(f) and \"Boulder {\" not in f[k]:\n k += 1\n k += 1\n while \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n\n while k < len(f) and \"Elephant {\" not in f[k]:\n k += 1\n k += 1\n while \":\" in f[k] and \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(\"]\")[0].split(\",\")\n xdir, ydir = 0, 0\n if d[0] == \"1\":\n xdir = 1\n elif d[0] == \"-1\":\n xdir = -1\n if d[1] == \"1\":\n ydir = 1\n elif d[1] == \"-1\":\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n\n while k < len(f) and \"Rhinoceros {\" not in f[k]:\n k += 1\n k += 1\n while \":\" in f[k] and \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(\"]\")[0].split(\",\")\n xdir, ydir = 0, 0\n if d[0] == \"1\":\n xdir = 1\n elif d[0] == \"-1\":\n xdir = -1\n if d[1] == \"1\":\n ydir = 1\n elif d[1] == \"-1\":\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n\n file.close()\n\n\nif __name__ == '__main__':\n g = GameMap()\n print(g)",
"step-ids": [
8,
9,
11,
15,
18
]
}
|
[
8,
9,
11,
15,
18
] |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def chrDIC(df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type)
|
normal
|
{
"blob_id": "5c5922fd3a7a5eec121d94e69bc972089e435175",
"index": 9406,
"step-1": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\n<mask token>\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\n<mask token>\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\n<mask token>\n\n\ndef flattenGTF(file_in, file_type, NEXTFLOW=True):\n if file_type == 'ENSEMBL':\n print(f'Flattening ENSEMBL like genome {file_in}')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['chr'] = df['chr'].astype(str)\n df = df[~df['chr'].str.contains('\\\\.')]\n df.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n fout = f'{file_in[:-4]}_sort.gtf'\n df.to_csv(fout, sep='\\t', index=None, quoting=csv.QUOTE_NONE,\n header=None)\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('gene_id ', '')\n df['name'] = df['name'].str.replace('\"', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'BED':\n my_col = ['chr', 'start', 'end', 'name', 'strand']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n if file_type == 'REFSEQGFF':\n print(f'Flattening REFSEQGFF like genome')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n replace_list = [('chr1', 'NC_000001.11'), ('chr2', 'NC_000002.12'),\n ('chr3', 'NC_000003.12'), ('chr4', 'NC_000004.12'), ('chr5',\n 'NC_000005.10'), ('chr6', 'NC_000006.12'), ('chr7',\n 'NC_000007.14'), ('chr8', 'NC_000008.11'), ('chr9',\n 'NC_000009.12'), ('chr10', 'NC_000010.11'), ('chr11',\n 'NC_000011.10'), ('chr12', 'NC_000012.12'), ('chr13',\n 'NC_000013.11'), ('chr14', 'NC_000014.9'), ('chr15',\n 'NC_000015.10'), ('chr16', 'NC_000016.10'), ('chr17',\n 'NC_000017.11'), ('chr18', 'NC_000018.10'), ('chr19',\n 'NC_000019.10'), ('chr20', 'NC_000020.11'), ('chr21',\n 'NC_000021.9'), ('chr22', 'NC_000022.11'), ('chrX',\n 'NC_000023.11'), ('chrY', 'NC_000024.10')]\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[df['type'] == 'gene'].copy()\n for l in replace_list:\n df['chr'] = np.where(df['chr'] == l[1], l[0], df['chr'])\n df = df[~df['chr'].str.contains('\\\\.')]\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('ID=gene-', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'REFSEQBED':\n my_col = ['chr', 'start', 'end', 'name', 'dot', 'strand', 'start1',\n 'start2', 'dot2', 'dot3', 'gene_id', 'gene_id2']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[['chr', 'start', 'end', 'name', 'strand']]\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n df_plu = df[df['strand'] == '+'].copy()\n df_min = df[df['strand'] == '-'].copy()\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n df_plu.sort_values(by=['chr', 'end'], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=['start', 'chr'], keep='first', inplace=True)\n df_min.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=['end', 'chr'], keep='first', inplace=True)\n df = pd.concat([df_plu, df_min])\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n gtf = df[['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']]\n df = df[['chr', 'start', 'end', 'name', 'strand', 'FA_start', 'FA_end',\n 'LA_start', 'LA_end']]\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n fout = f'{file_in[:-4]}_flat.txt'\n fout2 = f'{file_in[:-4]}_flat.gtf'\n fout3 = f'{file_in[:-4]}_flat_CHROMNAMES.txt'\n print(f'Outputting flat file {fout}')\n df.to_csv(fout, sep='\\t', index=None)\n gtf.to_csv(fout2, sep='\\t', index=None, quoting=csv.QUOTE_NONE, header=None\n )\n gtf_names = gtf[['chr']].copy()\n gtf_names.drop_duplicates(subset=['chr'], keep='first', inplace=True)\n gtf_names.to_csv(fout3, sep='\\t', index=None)\n return df\n\n\n<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\n 'Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED'\n )\n parser.add_argument('--annotation_in', action='store', metavar=\n 'annotation_in')\n parser.add_argument('--file_type', action='store', metavar='file_type',\n default='ENSEMBL')\n args = parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef ThreePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n df['LA_end'] = df['gene_end']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['LA_start'] = df_exon['start']\n df_exon = df_exon[['name', 'LA_start']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['LA_length'] = df['LA_end'] - df['LA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\ndef removeInside(df):\n d_chr = chrDIC(df)\n df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']],\n row['start'], row['end']), axis=1)\n df2 = df.dropna(subset=['genes_inside'])\n all_names = []\n for i in range(len(df2)):\n names = df2['genes_inside'].iloc[i]\n names = names.split(',')\n all_names = all_names + names\n inside_genes = list(set(all_names))\n l = len(inside_genes)\n print(f'Removing {l} genes that are inside other genes')\n df_inside = pd.DataFrame(inside_genes, columns=['name'])\n df = df[~df['name'].isin(df_inside['name'])].copy()\n del df['genes_inside']\n return df, df_inside\n\n\ndef flattenGTF(file_in, file_type, NEXTFLOW=True):\n if file_type == 'ENSEMBL':\n print(f'Flattening ENSEMBL like genome {file_in}')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['chr'] = df['chr'].astype(str)\n df = df[~df['chr'].str.contains('\\\\.')]\n df.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n fout = f'{file_in[:-4]}_sort.gtf'\n df.to_csv(fout, sep='\\t', index=None, quoting=csv.QUOTE_NONE,\n header=None)\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('gene_id ', '')\n df['name'] = df['name'].str.replace('\"', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'BED':\n my_col = ['chr', 'start', 'end', 'name', 'strand']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n if file_type == 'REFSEQGFF':\n print(f'Flattening REFSEQGFF like genome')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n replace_list = [('chr1', 'NC_000001.11'), ('chr2', 'NC_000002.12'),\n ('chr3', 'NC_000003.12'), ('chr4', 'NC_000004.12'), ('chr5',\n 'NC_000005.10'), ('chr6', 'NC_000006.12'), ('chr7',\n 'NC_000007.14'), ('chr8', 'NC_000008.11'), ('chr9',\n 'NC_000009.12'), ('chr10', 'NC_000010.11'), ('chr11',\n 'NC_000011.10'), ('chr12', 'NC_000012.12'), ('chr13',\n 'NC_000013.11'), ('chr14', 'NC_000014.9'), ('chr15',\n 'NC_000015.10'), ('chr16', 'NC_000016.10'), ('chr17',\n 'NC_000017.11'), ('chr18', 'NC_000018.10'), ('chr19',\n 'NC_000019.10'), ('chr20', 'NC_000020.11'), ('chr21',\n 'NC_000021.9'), ('chr22', 'NC_000022.11'), ('chrX',\n 'NC_000023.11'), ('chrY', 'NC_000024.10')]\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[df['type'] == 'gene'].copy()\n for l in replace_list:\n df['chr'] = np.where(df['chr'] == l[1], l[0], df['chr'])\n df = df[~df['chr'].str.contains('\\\\.')]\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('ID=gene-', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'REFSEQBED':\n my_col = ['chr', 'start', 'end', 'name', 'dot', 'strand', 'start1',\n 'start2', 'dot2', 'dot3', 'gene_id', 'gene_id2']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[['chr', 'start', 'end', 'name', 'strand']]\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n df_plu = df[df['strand'] == '+'].copy()\n df_min = df[df['strand'] == '-'].copy()\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n df_plu.sort_values(by=['chr', 'end'], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=['start', 'chr'], keep='first', inplace=True)\n df_min.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=['end', 'chr'], keep='first', inplace=True)\n df = pd.concat([df_plu, df_min])\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n gtf = df[['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']]\n df = df[['chr', 'start', 'end', 'name', 'strand', 'FA_start', 'FA_end',\n 'LA_start', 'LA_end']]\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n fout = f'{file_in[:-4]}_flat.txt'\n fout2 = f'{file_in[:-4]}_flat.gtf'\n fout3 = f'{file_in[:-4]}_flat_CHROMNAMES.txt'\n print(f'Outputting flat file {fout}')\n df.to_csv(fout, sep='\\t', index=None)\n gtf.to_csv(fout2, sep='\\t', index=None, quoting=csv.QUOTE_NONE, header=None\n )\n gtf_names = gtf[['chr']].copy()\n gtf_names.drop_duplicates(subset=['chr'], keep='first', inplace=True)\n gtf_names.to_csv(fout3, sep='\\t', index=None)\n return df\n\n\n<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\n 'Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED'\n )\n parser.add_argument('--annotation_in', action='store', metavar=\n 'annotation_in')\n parser.add_argument('--file_type', action='store', metavar='file_type',\n default='ENSEMBL')\n args = parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef ThreePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n df['LA_end'] = df['gene_end']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['LA_start'] = df_exon['start']\n df_exon = df_exon[['name', 'LA_start']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['LA_length'] = df['LA_end'] - df['LA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\ndef removeInside(df):\n d_chr = chrDIC(df)\n df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']],\n row['start'], row['end']), axis=1)\n df2 = df.dropna(subset=['genes_inside'])\n all_names = []\n for i in range(len(df2)):\n names = df2['genes_inside'].iloc[i]\n names = names.split(',')\n all_names = all_names + names\n inside_genes = list(set(all_names))\n l = len(inside_genes)\n print(f'Removing {l} genes that are inside other genes')\n df_inside = pd.DataFrame(inside_genes, columns=['name'])\n df = df[~df['name'].isin(df_inside['name'])].copy()\n del df['genes_inside']\n return df, df_inside\n\n\ndef flattenGTF(file_in, file_type, NEXTFLOW=True):\n if file_type == 'ENSEMBL':\n print(f'Flattening ENSEMBL like genome {file_in}')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['chr'] = df['chr'].astype(str)\n df = df[~df['chr'].str.contains('\\\\.')]\n df.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n fout = f'{file_in[:-4]}_sort.gtf'\n df.to_csv(fout, sep='\\t', index=None, quoting=csv.QUOTE_NONE,\n header=None)\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('gene_id ', '')\n df['name'] = df['name'].str.replace('\"', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'BED':\n my_col = ['chr', 'start', 'end', 'name', 'strand']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n if file_type == 'REFSEQGFF':\n print(f'Flattening REFSEQGFF like genome')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n replace_list = [('chr1', 'NC_000001.11'), ('chr2', 'NC_000002.12'),\n ('chr3', 'NC_000003.12'), ('chr4', 'NC_000004.12'), ('chr5',\n 'NC_000005.10'), ('chr6', 'NC_000006.12'), ('chr7',\n 'NC_000007.14'), ('chr8', 'NC_000008.11'), ('chr9',\n 'NC_000009.12'), ('chr10', 'NC_000010.11'), ('chr11',\n 'NC_000011.10'), ('chr12', 'NC_000012.12'), ('chr13',\n 'NC_000013.11'), ('chr14', 'NC_000014.9'), ('chr15',\n 'NC_000015.10'), ('chr16', 'NC_000016.10'), ('chr17',\n 'NC_000017.11'), ('chr18', 'NC_000018.10'), ('chr19',\n 'NC_000019.10'), ('chr20', 'NC_000020.11'), ('chr21',\n 'NC_000021.9'), ('chr22', 'NC_000022.11'), ('chrX',\n 'NC_000023.11'), ('chrY', 'NC_000024.10')]\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[df['type'] == 'gene'].copy()\n for l in replace_list:\n df['chr'] = np.where(df['chr'] == l[1], l[0], df['chr'])\n df = df[~df['chr'].str.contains('\\\\.')]\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('ID=gene-', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'REFSEQBED':\n my_col = ['chr', 'start', 'end', 'name', 'dot', 'strand', 'start1',\n 'start2', 'dot2', 'dot3', 'gene_id', 'gene_id2']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[['chr', 'start', 'end', 'name', 'strand']]\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n df_plu = df[df['strand'] == '+'].copy()\n df_min = df[df['strand'] == '-'].copy()\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n df_plu.sort_values(by=['chr', 'end'], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=['start', 'chr'], keep='first', inplace=True)\n df_min.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=['end', 'chr'], keep='first', inplace=True)\n df = pd.concat([df_plu, df_min])\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n gtf = df[['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']]\n df = df[['chr', 'start', 'end', 'name', 'strand', 'FA_start', 'FA_end',\n 'LA_start', 'LA_end']]\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n fout = f'{file_in[:-4]}_flat.txt'\n fout2 = f'{file_in[:-4]}_flat.gtf'\n fout3 = f'{file_in[:-4]}_flat_CHROMNAMES.txt'\n print(f'Outputting flat file {fout}')\n df.to_csv(fout, sep='\\t', index=None)\n gtf.to_csv(fout2, sep='\\t', index=None, quoting=csv.QUOTE_NONE, header=None\n )\n gtf_names = gtf[['chr']].copy()\n gtf_names.drop_duplicates(subset=['chr'], keep='first', inplace=True)\n gtf_names.to_csv(fout3, sep='\\t', index=None)\n return df\n\n\n<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\n 'Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED'\n )\n parser.add_argument('--annotation_in', action='store', metavar=\n 'annotation_in')\n parser.add_argument('--file_type', action='store', metavar='file_type',\n default='ENSEMBL')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n file_in = args.annotation_in\n file_type = args.file_type\n flattenGTF(file_in, file_type)\n",
"step-5": "\n#################################################\n### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###\n#################################################\n# file to edit: dev_nb/10_DogcatcherFlatten.ipynb\nimport pandas as pd\nimport argparse\nimport csv\nimport os\n\nimport numpy as np\nimport string\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=[\"chr\",\"end\"],ascending=True)\n df[\"FA_start\"] = df[\"gene_start\"]\n df_exon = df[df[\"type\"]==\"exon\"].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'],keep=\"first\")\n df_exon[\"FA_end\"] = df_exon[\"end\"]\n df_exon = df_exon[[\"name\",\"FA_end\"]]\n df = pd.merge(df,df_exon,how=\"left\",on=\"name\")\n df[\"FA_length\"] = df[\"FA_end\"] - df[\"FA_start\"]\n df = df.drop_duplicates(subset=['name'],keep=\"first\")\n return df\n\n\ndef ThreePrimeArea(df):\n df = df.sort_values(by=[\"chr\",\"end\"],ascending=False)\n df[\"LA_end\"] = df[\"gene_end\"]\n df_exon = df[df[\"type\"]==\"exon\"].copy()\n # Keep first exon\n df_exon = df_exon.drop_duplicates(subset=['name'],keep=\"first\")\n df_exon[\"LA_start\"] = df_exon[\"start\"]\n df_exon = df_exon[[\"name\",\"LA_start\"]]\n df = pd.merge(df,df_exon,how=\"left\",on=\"name\")\n df[\"LA_length\"] = df[\"LA_end\"] - df[\"LA_start\"]\n df = df.drop_duplicates(subset=['name'],keep=\"first\")\n return df\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n\n df_plu = df[df[\"strand\"]==\"+\"]\n df_min = df[df[\"strand\"]==\"-\"]\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_min_LA = ThreePrimeArea(df_min)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_plu = pd.merge(df_plu_FA,df_plu_LA,on=\"name\")\n df_min = pd.merge(df_min_FA,df_min_LA,on=\"name\")\n df = pd.concat([df_plu,df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names=df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom : df[df[\"chr\"]==chrom] for chrom in chr_names}\n return d_chr\n\ndef countInside(df, start, end):\n rows_df = df[ (start < df[\"start\"]) & (df[\"end\"] < end) ]\n names = rows_df['name'].unique().tolist()\n names = \",\".join(names)\n if len(names) >0:\n return names\n else:\n return np.nan\n\ndef removeInside(df):\n d_chr = chrDIC(df)\n\n df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row[\"start\"], row[\"end\"]), axis=1)\n df2 = df.dropna(subset=['genes_inside'])\n all_names = []\n for i in range(len(df2)):\n names = df2[\"genes_inside\"].iloc[i]\n names = names.split(\",\")\n all_names = all_names + names\n\n inside_genes = list(set(all_names))\n l = len(inside_genes)\n print(f\"Removing {l} genes that are inside other genes\")\n\n df_inside = pd.DataFrame(inside_genes,columns=['name'])\n df = df[~df[\"name\"].isin(df_inside[\"name\"])].copy()\n del df[\"genes_inside\"]\n\n return df, df_inside\n\ndef flattenGTF(file_in,file_type,NEXTFLOW=True):\n if file_type == \"ENSEMBL\":\n print(f\"Flattening ENSEMBL like genome {file_in}\")\n my_col = [\"chr\",\"source\",\"type\",\"start\",\"end\",\"dot\",\"strand\",\"dot2\",\"gene_id\"]\n\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n\n df[\"chr\"] = df[\"chr\"].astype(str)\n df = df[~df[\"chr\"].str.contains(\"\\.\") ] # Take out patches\n\n df.sort_values(by=[\"chr\",\"start\"], inplace=True, ascending=True)\n fout = f\"{file_in[:-4]}_sort.gtf\"\n df.to_csv(fout,sep=\"\\t\", index=None,quoting=csv.QUOTE_NONE, header=None)\n\n\n df[\"name\"] = df[\"gene_id\"].str.split(';',expand=True)[0]\n df[\"name\"] = df[\"name\"].str.replace(\"gene_id \",\"\")\n df[\"name\"] = df[\"name\"].str.replace(\"\\\"\",\"\")\n\n df[\"type\"] = df[\"type\"].astype(str)\n\n df_gene = df[df[\"type\"]==\"gene\"].copy()\n df_gene[\"gene_start\"] = df_gene[\"start\"]\n df_gene[\"gene_end\"] = df_gene[\"end\"]\n\n df_gene = df_gene[[\"name\",\"gene_start\",\"gene_end\"]].copy()\n df = pd.merge(df,df_gene,how=\"left\",on=\"name\")\n df = getAreas(df)\n df[\"start\"] = df[\"gene_start\"]\n df[\"end\"] = df[\"gene_end\"]\n# df = df[[\"chr\",\"start\",\"end\",\"strand\",\"name\",\"type\"]].copy()\n\n\n if file_type == \"BED\":\n my_col = [\"chr\",\"start\",\"end\",\"name\",\"strand\"]\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n df[\"FA_start\"] = df[\"start\"]\n df[\"FA_end\"] = df[\"end\"]\n df[\"LA_start\"] = df[\"start\"]\n df[\"LA_end\"] = df[\"end\"]\n df[\"dot\"] = \".\"\n df[\"dot2\"] = \".\"\n df[\"source\"] = \"NA\"\n df[\"type\"] = \"NA\"\n df[\"gene_id\"] = df[\"name\"]\n\n\n\n\n if file_type == \"REFSEQGFF\":\n\n # Chrome numbers are changed. Need to change back to chr1 etc.\n# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly\n print(f\"Flattening REFSEQGFF like genome\")\n# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/\n #download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz\n # sort and index in IGV\n# NC_000001.11\tBestRefSeq\tgene\t11874\t14409\t.\t+\t.\tgene_id \"DDX11L1\"; transcript_id \"\"; db_xref \"GeneID:100287102\"; db_xref \"HGNC:HGNC:37102\"; description \"DEAD/H-box helicase 11 like 1 (pseudogene)\"; gbkey \"Gene\"; gene \"DDX11L1\"; gene_biotype \"transcribed_pseudogene\"; pseudo \"true\";\n\n\n\n my_col = [\"chr\",\"source\",\"type\",\"start\",\"end\",\"dot\",\"strand\",\"dot2\",\"gene_id\"]\n\n replace_list = [(\"chr1\",\"NC_000001.11\"),\n (\"chr2\",\"NC_000002.12\"),\n (\"chr3\",\"NC_000003.12\"),\n (\"chr4\",\"NC_000004.12\"),\n (\"chr5\",\"NC_000005.10\"),\n (\"chr6\",\"NC_000006.12\"),\n (\"chr7\",\"NC_000007.14\"),\n (\"chr8\",\"NC_000008.11\"),\n (\"chr9\",\"NC_000009.12\"),\n (\"chr10\",\"NC_000010.11\"),\n (\"chr11\",\"NC_000011.10\"),\n (\"chr12\",\"NC_000012.12\"),\n (\"chr13\",\"NC_000013.11\"),\n (\"chr14\",\"NC_000014.9\"),\n (\"chr15\",\"NC_000015.10\"),\n (\"chr16\",\"NC_000016.10\"),\n (\"chr17\",\"NC_000017.11\"),\n (\"chr18\",\"NC_000018.10\"),\n (\"chr19\",\"NC_000019.10\"),\n (\"chr20\",\"NC_000020.11\"),\n (\"chr21\",\"NC_000021.9\"),\n (\"chr22\",\"NC_000022.11\"),\n (\"chrX\",\"NC_000023.11\"),\n (\"chrY\",\"NC_000024.10\")]\n\n\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n\n df = df[df[\"type\"]==\"gene\"].copy()\n\n # Change NC names to chr\n for l in replace_list:\n df[\"chr\"] = np.where(df[\"chr\"]==l[1],l[0],df[\"chr\"])\n\n df = df[~df[\"chr\"].str.contains(\"\\.\") ] # Take out patches\n\n\n df[\"name\"] = df[\"gene_id\"].str.split(';',expand=True)[0]\n df[\"name\"] = df[\"name\"].str.replace(\"ID=gene-\",\"\")\n\n df[\"type\"] = df[\"type\"].astype(str)\n\n df_gene = df[df[\"type\"]==\"gene\"].copy()\n df_gene[\"gene_start\"] = df_gene[\"start\"]\n df_gene[\"gene_end\"] = df_gene[\"end\"]\n\n df_gene = df_gene[[\"name\",\"gene_start\",\"gene_end\"]].copy()\n df = pd.merge(df,df_gene,how=\"left\",on=\"name\")\n df = getAreas(df)\n df[\"start\"] = df[\"gene_start\"]\n df[\"end\"] = df[\"gene_end\"]\n# df = df[[\"chr\",\"start\",\"end\",\"strand\",\"name\",\"type\"]].copy()\n\n\n\n\n\n\n\n if file_type == \"REFSEQBED\":\n\n# chr1\t11873\t14409\tNR_046018\t0\t+\t\n# 14409\t14409\t0\t3\t354,109,1189,\t0,739,1347,\n\n\n my_col = [\"chr\",\"start\",\"end\",\"name\",\"dot\",\"strand\",\"start1\",\"start2\",\"dot2\",\"dot3\",\"gene_id\",\"gene_id2\"]\n\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n df = df[[\"chr\",\"start\",\"end\",\"name\",\"strand\"]]\n df[\"FA_start\"] = df[\"start\"]\n df[\"FA_end\"] = df[\"end\"]\n df[\"LA_start\"] = df[\"start\"]\n df[\"LA_end\"] = df[\"end\"]\n df[\"dot\"] = \".\"\n df[\"dot2\"] = \".\"\n df[\"source\"] = \"NA\"\n df[\"type\"] = \"NA\"\n df[\"gene_id\"] = df[\"name\"]\n\n\n\n df_plu = df[df[\"strand\"]==\"+\"].copy()\n df_min = df[df[\"strand\"]==\"-\"].copy()\n\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n\n df_plu.sort_values(by=[\"chr\",\"end\"], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=[\"start\",\"chr\"], keep='first', inplace=True)\n\n df_min.sort_values(by=[\"chr\",\"start\"], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=[\"end\",\"chr\"], keep='first', inplace=True)\n\n\n df = pd.concat([df_plu,df_min])\n df = df.sort_values(by=[\"chr\",\"end\"],ascending=False)\n\n\n gtf = df[[\"chr\",\"source\",\"type\",\"start\",\"end\",\"dot\",\"strand\",\"dot2\",\"gene_id\"] ]\n df = df[[\"chr\",\"start\",\"end\",\"name\",\"strand\",\"FA_start\",\"FA_end\",\"LA_start\",\"LA_end\"]]\n\n\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n\n fout = f\"{file_in[:-4]}_flat.txt\"\n fout2 = f\"{file_in[:-4]}_flat.gtf\"\n fout3 = f\"{file_in[:-4]}_flat_CHROMNAMES.txt\"\n\n\n\n print(f\"Outputting flat file {fout}\")\n df.to_csv(fout,sep=\"\\t\",index=None)\n\n\n gtf.to_csv(fout2,sep=\"\\t\", index=None,quoting=csv.QUOTE_NONE, header=None)\n\n gtf_names = gtf[[\"chr\"]].copy()\n gtf_names.drop_duplicates(subset=[\"chr\"], keep='first', inplace=True)\n\n gtf_names.to_csv(fout3,sep=\"\\t\", index=None)\n\n return df\n\n\nimport argparse\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')\n parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')\n parser.add_argument('--file_type', action= 'store', metavar='file_type',default=\"ENSEMBL\")\n args = parser.parse_args()\n return args\n\nif __name__==\"__main__\":\n args = parse_arguments()\n file_in = args.annotation_in\n file_type = args.file_type\n\n flattenGTF(file_in,file_type)\n",
"step-ids": [
4,
6,
8,
9,
11
]
}
|
[
4,
6,
8,
9,
11
] |
from django.urls import path
from django.contrib.auth import views as auth_views
from . views import register, channel
urlpatterns = [
path('register/', register, name="register"),
path('channel/', channel, name="channel"),
path('login/', auth_views.LoginView.as_view(template_name='user/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='user/logout.html'), name='logout'),
path('password_reset/',auth_views.PasswordResetView.as_view(template_name="user/password_reset.html"), name="password_reset"),
path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(template_name="user/password_reset_done.html"), name="password_reset_done"),
path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name="user/password_reset_confirm.html"),name="password_reset_confirm"),
path('password_reset_complete/',auth_views.PasswordResetCompleteView.as_view(template_name="user/password_reset_complete.html"),name="password_reset_complete"),
]
|
normal
|
{
"blob_id": "d76c1507594bb0c1ed7a83e6c5961097c7fbf54a",
"index": 9859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('register/', register, name='register'), path(\n 'channel/', channel, name='channel'), path('login/', auth_views.\n LoginView.as_view(template_name='user/login.html'), name='login'), path\n ('logout/', auth_views.LogoutView.as_view(template_name=\n 'user/logout.html'), name='logout'), path('password_reset/', auth_views\n .PasswordResetView.as_view(template_name='user/password_reset.html'),\n name='password_reset'), path('password_reset/done/', auth_views.\n PasswordResetDoneView.as_view(template_name=\n 'user/password_reset_done.html'), name='password_reset_done'), path(\n 'password_reset_confirm/<uidb64>/<token>/', auth_views.\n PasswordResetConfirmView.as_view(template_name=\n 'user/password_reset_confirm.html'), name='password_reset_confirm'),\n path('password_reset_complete/', auth_views.PasswordResetCompleteView.\n as_view(template_name='user/password_reset_complete.html'), name=\n 'password_reset_complete')]\n",
"step-3": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom .views import register, channel\nurlpatterns = [path('register/', register, name='register'), path(\n 'channel/', channel, name='channel'), path('login/', auth_views.\n LoginView.as_view(template_name='user/login.html'), name='login'), path\n ('logout/', auth_views.LogoutView.as_view(template_name=\n 'user/logout.html'), name='logout'), path('password_reset/', auth_views\n .PasswordResetView.as_view(template_name='user/password_reset.html'),\n name='password_reset'), path('password_reset/done/', auth_views.\n PasswordResetDoneView.as_view(template_name=\n 'user/password_reset_done.html'), name='password_reset_done'), path(\n 'password_reset_confirm/<uidb64>/<token>/', auth_views.\n PasswordResetConfirmView.as_view(template_name=\n 'user/password_reset_confirm.html'), name='password_reset_confirm'),\n path('password_reset_complete/', auth_views.PasswordResetCompleteView.\n as_view(template_name='user/password_reset_complete.html'), name=\n 'password_reset_complete')]\n",
"step-4": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom . views import register, channel\n\n\nurlpatterns = [\n path('register/', register, name=\"register\"),\n path('channel/', channel, name=\"channel\"),\n path('login/', auth_views.LoginView.as_view(template_name='user/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='user/logout.html'), name='logout'),\n path('password_reset/',auth_views.PasswordResetView.as_view(template_name=\"user/password_reset.html\"), name=\"password_reset\"),\n path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(template_name=\"user/password_reset_done.html\"), name=\"password_reset_done\"),\n path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name=\"user/password_reset_confirm.html\"),name=\"password_reset_confirm\"),\n path('password_reset_complete/',auth_views.PasswordResetCompleteView.as_view(template_name=\"user/password_reset_complete.html\"),name=\"password_reset_complete\"),\n]\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Register(decompil.ir.Register):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = ' ({})'.format(self.extension.name) if self.extension else ''
return '{:04x}: {}{}'.format(self.address, self.name, ext)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(self.address, self.name)
<|reserved_special_token_0|>
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(insn.name, "'{}".format(insn.extension.
name) if insn.is_extended else '')
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Register(decompil.ir.Register):
def __init__(self, context, name, width, components=None):
self.context = context
self.type = context.create_int_type(width)
self.name = name
self.components = components
self.registers = [reg for reg, _ in components] if components else None
def build_load(self, builder):
if self.components is None:
return builder.build_rload(self)
else:
result = None
for reg, shift in self.components:
val = builder.build_zext(self.type, builder.build_rload(reg))
if shift:
val = builder.build_lshl(val, self.type.create(shift))
if result:
result = builder.build_add(result, val)
else:
result = val
return result
def build_store(self, builder, value):
assert value.type == self.type
if self.components is None:
builder.build_rstore(self, value)
else:
for reg, shift in self.components:
if shift:
val = builder.build_lshl(value, value.type.create(shift))
val = builder.build_trunc(reg.type, val)
builder.build_rstore(reg, val)
def build_load_comp(self, builder):
return [builder.build_rload(reg) for reg, _ in self.components]
def build_store_comp(self, builder, *values):
assert len(values) == len(self.components)
for value, (reg, _) in zip(values, self.components):
builder.build_rstore(reg, value)
def format(self):
return [(Name.Variable, '${}'.format(self.name))]
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = ' ({})'.format(self.extension.name) if self.extension else ''
return '{:04x}: {}{}'.format(self.address, self.name, ext)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(self.address, self.name)
<|reserved_special_token_0|>
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(insn.name, "'{}".format(insn.extension.
name) if insn.is_extended else '')
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Context(decompil.ir.Context):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Register(decompil.ir.Register):
def __init__(self, context, name, width, components=None):
self.context = context
self.type = context.create_int_type(width)
self.name = name
self.components = components
self.registers = [reg for reg, _ in components] if components else None
def build_load(self, builder):
if self.components is None:
return builder.build_rload(self)
else:
result = None
for reg, shift in self.components:
val = builder.build_zext(self.type, builder.build_rload(reg))
if shift:
val = builder.build_lshl(val, self.type.create(shift))
if result:
result = builder.build_add(result, val)
else:
result = val
return result
def build_store(self, builder, value):
assert value.type == self.type
if self.components is None:
builder.build_rstore(self, value)
else:
for reg, shift in self.components:
if shift:
val = builder.build_lshl(value, value.type.create(shift))
val = builder.build_trunc(reg.type, val)
builder.build_rstore(reg, val)
def build_load_comp(self, builder):
return [builder.build_rload(reg) for reg, _ in self.components]
def build_store_comp(self, builder, *values):
assert len(values) == len(self.components)
for value, (reg, _) in zip(values, self.components):
builder.build_rstore(reg, value)
def format(self):
return [(Name.Variable, '${}'.format(self.name))]
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = ' ({})'.format(self.extension.name) if self.extension else ''
return '{:04x}: {}{}'.format(self.address, self.name, ext)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(self.address, self.name)
<|reserved_special_token_0|>
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(insn.name, "'{}".format(insn.extension.
name) if insn.is_extended else '')
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Context(decompil.ir.Context):
def __init__(self):
super(Context, self).__init__(16)
self.pointer_type = self.create_pointer_type(self.half_type)
self.init_registers()
def init_registers(self):
self.registers = regs = [Register(self, 'ar0', 16), Register(self,
'ar1', 16), Register(self, 'ar2', 16), Register(self, 'ar3', 16
), Register(self, 'ix0', 16), Register(self, 'ix1', 16),
Register(self, 'ix2', 16), Register(self, 'ix3', 16), Register(
self, 'r08', 16), Register(self, 'r09', 16), Register(self,
'r0a', 16), Register(self, 'r0b', 16), Register(self, 'st0', 16
), Register(self, 'st1', 16), Register(self, 'st2', 16),
Register(self, 'st3', 16), Register(self, 'ac0.h', 16),
Register(self, 'ac1.h', 16), Register(self, 'config', 16),
Register(self, 'sr', 16), Register(self, 'prod.l', 16),
Register(self, 'prod.m1', 16), Register(self, 'prod.h', 16),
Register(self, 'prod.m2', 16), Register(self, 'ax0.l', 16),
Register(self, 'ax1.l', 16), Register(self, 'ax0.h', 16),
Register(self, 'ax1.h', 16), Register(self, 'ac0.l', 16),
Register(self, 'ac1.l', 16), Register(self, 'ac0.m', 16),
Register(self, 'ac1.m', 16)]
self.wr_registers = [Register(self, 'wr{}'.format(i), 16) for i in
range(4)]
self.addr_to_wr = {self.registers[0]: self.wr_registers[0], self.
registers[1]: self.wr_registers[1], self.registers[2]: self.
wr_registers[2], self.registers[3]: self.wr_registers[3]}
self.addr_to_ix = {self.registers[0]: self.registers[4], self.
registers[1]: self.registers[5], self.registers[2]: self.
registers[6], self.registers[3]: self.registers[7]}
self.long_accumulators = [Register(self, 'ac0', 40, [(regs[16], 32),
(regs[30], 16), (regs[28], 0)]), Register(self, 'ac1', 40, [(
regs[17], 32), (regs[31], 16), (regs[29], 0)])]
self.short_accumulators = [Register(self, 'acs0', 24, [(regs[16],
16), (regs[30], 0)]), Register(self, 'acs1', 24, [(regs[17], 16
), (regs[31], 0)])]
self.extra_acculumators = [Register(self, 'ax0', 32, [(regs[26], 16
), (regs[24], 0)]), Register(self, 'ax1', 32, [(regs[27], 16),
(regs[25], 0)])]
self.prod_register = Register(self, 'prod', 40, [(regs[23], 16), (
regs[22], 32), (regs[21], 16), (regs[20], 0)])
class Register(decompil.ir.Register):
def __init__(self, context, name, width, components=None):
self.context = context
self.type = context.create_int_type(width)
self.name = name
self.components = components
self.registers = [reg for reg, _ in components] if components else None
def build_load(self, builder):
if self.components is None:
return builder.build_rload(self)
else:
result = None
for reg, shift in self.components:
val = builder.build_zext(self.type, builder.build_rload(reg))
if shift:
val = builder.build_lshl(val, self.type.create(shift))
if result:
result = builder.build_add(result, val)
else:
result = val
return result
def build_store(self, builder, value):
assert value.type == self.type
if self.components is None:
builder.build_rstore(self, value)
else:
for reg, shift in self.components:
if shift:
val = builder.build_lshl(value, value.type.create(shift))
val = builder.build_trunc(reg.type, val)
builder.build_rstore(reg, val)
def build_load_comp(self, builder):
return [builder.build_rload(reg) for reg, _ in self.components]
def build_store_comp(self, builder, *values):
assert len(values) == len(self.components)
for value, (reg, _) in zip(values, self.components):
builder.build_rstore(reg, value)
def format(self):
return [(Name.Variable, '${}'.format(self.name))]
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = ' ({})'.format(self.extension.name) if self.extension else ''
return '{:04x}: {}{}'.format(self.address, self.name, ext)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(self.address, self.name)
<|reserved_special_token_0|>
def _init_tables():
import gcdsp.decoders
def helper(table, cls):
for obj_name in dir(gcdsp.decoders):
obj = getattr(gcdsp.decoders, obj_name)
if not (inspect.isclass(obj) and issubclass(obj, cls) and obj !=
cls):
continue
assert obj.opcode & ~obj.opcode_mask == 0
table.append(obj)
helper(instructions, Instruction)
helper(instruction_extensions, InstructionExtension)
<|reserved_special_token_0|>
def load_insns():
import gcdsp.decoders
def default_decoder(self, context, disassembler, builder):
builder.build_undef()
disassembler.stop_basic_block()
def decode_operands(self, context):
result = []
for _, size, addend, rshift, mask in self.operands_format:
operand = (self.opcode & mask) >> rshift
result.append(self.opcode & mask + addend)
return result
Insn = collections.namedtuple('Insn',
'name opcode mask size unused0 operands is_extended unused1')
for insn in gcdsp.decoders.opcodes:
insn = Insn(*insn)
insn_decoder = getattr(gcdsp.decoders, 'decode_{}'.format(insn.name
.lower()), default_decoder)
instructions.append(type(insn.name, (Instruction,), {'name': insn.
name, 'opcode': insn.opcode, 'opcode_mask': insn.mask,
'have_extra_operand': insn.size == 2, 'is_extended': insn.
is_extended, 'decode': insn_decoder, 'decode_operands':
decode_operands, 'operands_format': insn.operands}))
for ext in gcdsp.decoders.opcodes_ext:
ext = Insn(*ext)
instruction_extensions.append(type(ext.name, (InstructionExtension,
), {'name': ext.name, 'opcode': ext.opcode, 'opcode_mask': ext.
mask, 'decode': insn_decoder, 'decode_operands':
decode_operands, 'operands_format': insn.operands}))
<|reserved_special_token_0|>
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(insn.name, "'{}".format(insn.extension.
name) if insn.is_extended else '')
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
<|reserved_special_token_1|>
import collections
import inspect
import struct
from pygments.token import *
import decompil.builder
import decompil.disassemblers
import decompil.ir
class Context(decompil.ir.Context):
def __init__(self):
super(Context, self).__init__(16)
self.pointer_type = self.create_pointer_type(self.half_type)
self.init_registers()
def init_registers(self):
self.registers = regs = [
# 0x00-0x03
Register(self, 'ar0', 16),
Register(self, 'ar1', 16),
Register(self, 'ar2', 16),
Register(self, 'ar3', 16),
# 0x04-0x07
Register(self, 'ix0', 16),
Register(self, 'ix1', 16),
Register(self, 'ix2', 16),
Register(self, 'ix3', 16),
# 0x08-0xb
Register(self, 'r08', 16),
Register(self, 'r09', 16),
Register(self, 'r0a', 16),
Register(self, 'r0b', 16),
# 0x0c-0x0f
# TODO: something special?
Register(self, 'st0', 16),
Register(self, 'st1', 16),
Register(self, 'st2', 16),
Register(self, 'st3', 16),
# 0x10-0x11
# TODO: handle 8-bit overflow
Register(self, 'ac0.h', 16),
Register(self, 'ac1.h', 16),
# 0x12-0x13
Register(self, 'config', 16),
Register(self, 'sr', 16),
# 0x14-0x17
Register(self, 'prod.l', 16),
Register(self, 'prod.m1', 16),
# TODO: handle 8-bit overflow
Register(self, 'prod.h', 16),
Register(self, 'prod.m2', 16),
# 0x18-0x1b
Register(self, 'ax0.l', 16),
Register(self, 'ax1.l', 16),
Register(self, 'ax0.h', 16),
Register(self, 'ax1.h', 16),
# 0x1c-0x1f
Register(self, 'ac0.l', 16),
Register(self, 'ac1.l', 16),
Register(self, 'ac0.m', 16),
Register(self, 'ac1.m', 16),
]
self.wr_registers = [
Register(self, 'wr{}'.format(i), 16) for i in range(4)
]
self.addr_to_wr = {
self.registers[0x00]: self.wr_registers[0x00],
self.registers[0x01]: self.wr_registers[0x01],
self.registers[0x02]: self.wr_registers[0x02],
self.registers[0x03]: self.wr_registers[0x03],
}
self.addr_to_ix = {
self.registers[0x00]: self.registers[0x04],
self.registers[0x01]: self.registers[0x05],
self.registers[0x02]: self.registers[0x06],
self.registers[0x03]: self.registers[0x07],
}
self.long_accumulators = [
Register(self, 'ac0', 40, [
(regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0)
]),
Register(self, 'ac1', 40, [
(regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0)
]),
]
self.short_accumulators = [
Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]),
Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]),
]
self.extra_acculumators = [
Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]),
Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]),
]
self.prod_register = Register(self, 'prod', 40, [
(regs[0x17], 16),
(regs[0x16], 32),
(regs[0x15], 16),
(regs[0x14], 0),
])
class Register(decompil.ir.Register):
def __init__(self, context, name, width, components=None):
self.context = context
self.type = context.create_int_type(width)
self.name = name
self.components = components
self.registers = (
[reg for reg, _ in components]
if components else
None
)
def build_load(self, builder):
if self.components is None:
return builder.build_rload(self)
else:
result = None
for reg, shift in self.components:
val = builder.build_zext(
self.type, builder.build_rload(reg)
)
if shift:
val = builder.build_lshl(val, self.type.create(shift))
if result:
result = builder.build_add(result, val)
else:
result = val
return result
def build_store(self, builder, value):
assert value.type == self.type
if self.components is None:
builder.build_rstore(self, value)
else:
for reg, shift in self.components:
if shift:
val = builder.build_lshl(value, value.type.create(shift))
val = builder.build_trunc(reg.type, val)
builder.build_rstore(reg, val)
def build_load_comp(self, builder):
return [
builder.build_rload(reg)
for reg, _ in self.components
]
def build_store_comp(self, builder, *values):
assert len(values) == len(self.components)
for value, (reg, _) in zip(values, self.components):
builder.build_rstore(reg, value)
def format(self):
return [(Name.Variable, '${}'.format(self.name))]
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = (
' ({})'.format(self.extension.name)
if self.extension else
''
)
return '{:04x}: {}{}'.format(
self.address, self.name, ext
)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
# When accepting an extension, instructions should set the following
# field:
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(
self.address, self.name
)
instructions = []
instruction_extensions = []
def _init_tables():
import gcdsp.decoders
def helper(table, cls):
for obj_name in dir(gcdsp.decoders):
obj = getattr(gcdsp.decoders, obj_name)
if not (
inspect.isclass(obj)
and issubclass(obj, cls)
and obj != cls
):
continue
assert (obj.opcode & ~obj.opcode_mask) == 0
table.append(obj)
helper(instructions, Instruction)
helper(instruction_extensions, InstructionExtension)
_init_tables()
def load_insns():
import gcdsp.decoders
def default_decoder(self, context, disassembler, builder):
builder.build_undef()
disassembler.stop_basic_block()
def decode_operands(self, context):
result = []
for _, size, addend, rshift, mask in self.operands_format:
operand = (self.opcode & mask) >> rshift
result.append(self.opcode & mask + addend)
return result
Insn = collections.namedtuple(
'Insn', 'name opcode mask size unused0 operands is_extended unused1'
)
for insn in gcdsp.decoders.opcodes:
insn = Insn(*insn)
insn_decoder = getattr(
gcdsp.decoders,
'decode_{}'.format(insn.name.lower()),
default_decoder,
)
instructions.append(
type(insn.name, (Instruction, ), {
'name': insn.name,
'opcode': insn.opcode,
'opcode_mask': insn.mask,
'have_extra_operand': insn.size == 2,
'is_extended': insn.is_extended,
'decode': insn_decoder,
'decode_operands': decode_operands,
'operands_format': insn.operands
})
)
for ext in gcdsp.decoders.opcodes_ext:
ext = Insn(*ext)
instruction_extensions.append(
type(ext.name, (InstructionExtension, ), {
'name': ext.name,
'opcode': ext.opcode,
'opcode_mask': ext.mask,
'decode': insn_decoder,
'decode_operands': decode_operands,
'operands_format': insn.operands
})
)
load_insns()
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
# Parse the extra operand, if any.
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
# Parse the instruction extension, if any.
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(
insn.name,
"'{}".format(insn.extension.name) if insn.is_extended else ''
)
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
# Always decode the extension first (if any).
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
# TODO: remove this once all extensions are supported.
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
|
flexible
|
{
"blob_id": "865d7c606b287dbce158f721c6cf768cd078eb48",
"index": 9231,
"step-1": "<mask token>\n\n\nclass Register(decompil.ir.Register):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-2": "<mask token>\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-3": "<mask token>\n\n\nclass Context(decompil.ir.Context):\n <mask token>\n <mask token>\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-4": "<mask token>\n\n\nclass Context(decompil.ir.Context):\n\n def __init__(self):\n super(Context, self).__init__(16)\n self.pointer_type = self.create_pointer_type(self.half_type)\n self.init_registers()\n\n def init_registers(self):\n self.registers = regs = [Register(self, 'ar0', 16), Register(self,\n 'ar1', 16), Register(self, 'ar2', 16), Register(self, 'ar3', 16\n ), Register(self, 'ix0', 16), Register(self, 'ix1', 16),\n Register(self, 'ix2', 16), Register(self, 'ix3', 16), Register(\n self, 'r08', 16), Register(self, 'r09', 16), Register(self,\n 'r0a', 16), Register(self, 'r0b', 16), Register(self, 'st0', 16\n ), Register(self, 'st1', 16), Register(self, 'st2', 16),\n Register(self, 'st3', 16), Register(self, 'ac0.h', 16),\n Register(self, 'ac1.h', 16), Register(self, 'config', 16),\n Register(self, 'sr', 16), Register(self, 'prod.l', 16),\n Register(self, 'prod.m1', 16), Register(self, 'prod.h', 16),\n Register(self, 'prod.m2', 16), Register(self, 'ax0.l', 16),\n Register(self, 'ax1.l', 16), Register(self, 'ax0.h', 16),\n Register(self, 'ax1.h', 16), Register(self, 'ac0.l', 16),\n Register(self, 'ac1.l', 16), Register(self, 'ac0.m', 16),\n Register(self, 'ac1.m', 16)]\n self.wr_registers = [Register(self, 'wr{}'.format(i), 16) for i in\n range(4)]\n self.addr_to_wr = {self.registers[0]: self.wr_registers[0], self.\n registers[1]: self.wr_registers[1], self.registers[2]: self.\n wr_registers[2], self.registers[3]: self.wr_registers[3]}\n self.addr_to_ix = {self.registers[0]: self.registers[4], self.\n registers[1]: self.registers[5], self.registers[2]: self.\n registers[6], self.registers[3]: self.registers[7]}\n self.long_accumulators = [Register(self, 'ac0', 40, [(regs[16], 32),\n (regs[30], 16), (regs[28], 0)]), Register(self, 'ac1', 40, [(\n regs[17], 32), (regs[31], 16), (regs[29], 0)])]\n self.short_accumulators = [Register(self, 'acs0', 24, [(regs[16], \n 16), (regs[30], 0)]), Register(self, 'acs1', 24, [(regs[17], 16\n ), (regs[31], 0)])]\n self.extra_acculumators = [Register(self, 'ax0', 32, [(regs[26], 16\n ), (regs[24], 0)]), Register(self, 'ax1', 32, [(regs[27], 16),\n (regs[25], 0)])]\n self.prod_register = Register(self, 'prod', 40, [(regs[23], 16), (\n regs[22], 32), (regs[21], 16), (regs[20], 0)])\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\ndef _init_tables():\n import gcdsp.decoders\n\n def helper(table, cls):\n for obj_name in dir(gcdsp.decoders):\n obj = getattr(gcdsp.decoders, obj_name)\n if not (inspect.isclass(obj) and issubclass(obj, cls) and obj !=\n cls):\n continue\n assert obj.opcode & ~obj.opcode_mask == 0\n table.append(obj)\n helper(instructions, Instruction)\n helper(instruction_extensions, InstructionExtension)\n\n\n<mask token>\n\n\ndef load_insns():\n import gcdsp.decoders\n\n def default_decoder(self, context, disassembler, builder):\n builder.build_undef()\n disassembler.stop_basic_block()\n\n def decode_operands(self, context):\n result = []\n for _, size, addend, rshift, mask in self.operands_format:\n operand = (self.opcode & mask) >> rshift\n result.append(self.opcode & mask + addend)\n return result\n Insn = collections.namedtuple('Insn',\n 'name opcode mask size unused0 operands is_extended unused1')\n for insn in gcdsp.decoders.opcodes:\n insn = Insn(*insn)\n insn_decoder = getattr(gcdsp.decoders, 'decode_{}'.format(insn.name\n .lower()), default_decoder)\n instructions.append(type(insn.name, (Instruction,), {'name': insn.\n name, 'opcode': insn.opcode, 'opcode_mask': insn.mask,\n 'have_extra_operand': insn.size == 2, 'is_extended': insn.\n is_extended, 'decode': insn_decoder, 'decode_operands':\n decode_operands, 'operands_format': insn.operands}))\n for ext in gcdsp.decoders.opcodes_ext:\n ext = Insn(*ext)\n instruction_extensions.append(type(ext.name, (InstructionExtension,\n ), {'name': ext.name, 'opcode': ext.opcode, 'opcode_mask': ext.\n mask, 'decode': insn_decoder, 'decode_operands':\n decode_operands, 'operands_format': insn.operands}))\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-5": "import collections\nimport inspect\nimport struct\n\nfrom pygments.token import *\n\nimport decompil.builder\nimport decompil.disassemblers\nimport decompil.ir\n\n\nclass Context(decompil.ir.Context):\n\n def __init__(self):\n super(Context, self).__init__(16)\n self.pointer_type = self.create_pointer_type(self.half_type)\n self.init_registers()\n\n def init_registers(self):\n self.registers = regs = [\n # 0x00-0x03\n Register(self, 'ar0', 16),\n Register(self, 'ar1', 16),\n Register(self, 'ar2', 16),\n Register(self, 'ar3', 16),\n\n # 0x04-0x07\n Register(self, 'ix0', 16),\n Register(self, 'ix1', 16),\n Register(self, 'ix2', 16),\n Register(self, 'ix3', 16),\n\n # 0x08-0xb\n Register(self, 'r08', 16),\n Register(self, 'r09', 16),\n Register(self, 'r0a', 16),\n Register(self, 'r0b', 16),\n\n # 0x0c-0x0f\n # TODO: something special?\n Register(self, 'st0', 16),\n Register(self, 'st1', 16),\n Register(self, 'st2', 16),\n Register(self, 'st3', 16),\n\n # 0x10-0x11\n # TODO: handle 8-bit overflow\n Register(self, 'ac0.h', 16),\n Register(self, 'ac1.h', 16),\n\n # 0x12-0x13\n Register(self, 'config', 16),\n Register(self, 'sr', 16),\n\n # 0x14-0x17\n Register(self, 'prod.l', 16),\n Register(self, 'prod.m1', 16),\n # TODO: handle 8-bit overflow\n Register(self, 'prod.h', 16),\n Register(self, 'prod.m2', 16),\n\n # 0x18-0x1b\n Register(self, 'ax0.l', 16),\n Register(self, 'ax1.l', 16),\n Register(self, 'ax0.h', 16),\n Register(self, 'ax1.h', 16),\n\n # 0x1c-0x1f\n Register(self, 'ac0.l', 16),\n Register(self, 'ac1.l', 16),\n Register(self, 'ac0.m', 16),\n Register(self, 'ac1.m', 16),\n ]\n\n self.wr_registers = [\n Register(self, 'wr{}'.format(i), 16) for i in range(4)\n ]\n\n self.addr_to_wr = {\n self.registers[0x00]: self.wr_registers[0x00],\n self.registers[0x01]: self.wr_registers[0x01],\n self.registers[0x02]: self.wr_registers[0x02],\n self.registers[0x03]: self.wr_registers[0x03],\n }\n self.addr_to_ix = {\n self.registers[0x00]: self.registers[0x04],\n self.registers[0x01]: self.registers[0x05],\n self.registers[0x02]: self.registers[0x06],\n self.registers[0x03]: self.registers[0x07],\n }\n\n self.long_accumulators = [\n Register(self, 'ac0', 40, [\n (regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0)\n ]),\n Register(self, 'ac1', 40, [\n (regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0)\n ]),\n ]\n self.short_accumulators = [\n Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]),\n Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]),\n ]\n self.extra_acculumators = [\n Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]),\n Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]),\n ]\n self.prod_register = Register(self, 'prod', 40, [\n (regs[0x17], 16),\n (regs[0x16], 32),\n (regs[0x15], 16),\n (regs[0x14], 0),\n ])\n\n\nclass Register(decompil.ir.Register):\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = (\n [reg for reg, _ in components]\n if components else\n None\n )\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(\n self.type, builder.build_rload(reg)\n )\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [\n builder.build_rload(reg)\n for reg, _ in self.components\n ]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = (\n ' ({})'.format(self.extension.name)\n if self.extension else\n ''\n )\n return '{:04x}: {}{}'.format(\n self.address, self.name, ext\n )\n\n\nclass InstructionExtension(BaseDecoder):\n def __init__(self, opcode):\n self.opcode_value = opcode\n # When accepting an extension, instructions should set the following\n # field:\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(\n self.address, self.name\n )\n\n\ninstructions = []\ninstruction_extensions = []\ndef _init_tables():\n import gcdsp.decoders\n\n def helper(table, cls):\n for obj_name in dir(gcdsp.decoders):\n obj = getattr(gcdsp.decoders, obj_name)\n if not (\n inspect.isclass(obj)\n and issubclass(obj, cls)\n and obj != cls\n ):\n continue\n assert (obj.opcode & ~obj.opcode_mask) == 0\n table.append(obj)\n\n helper(instructions, Instruction)\n helper(instruction_extensions, InstructionExtension)\n_init_tables()\n\n\ndef load_insns():\n import gcdsp.decoders\n\n def default_decoder(self, context, disassembler, builder):\n builder.build_undef()\n disassembler.stop_basic_block()\n\n def decode_operands(self, context):\n result = []\n for _, size, addend, rshift, mask in self.operands_format:\n operand = (self.opcode & mask) >> rshift\n result.append(self.opcode & mask + addend)\n return result\n\n Insn = collections.namedtuple(\n 'Insn', 'name opcode mask size unused0 operands is_extended unused1'\n )\n\n for insn in gcdsp.decoders.opcodes:\n insn = Insn(*insn)\n insn_decoder = getattr(\n gcdsp.decoders,\n 'decode_{}'.format(insn.name.lower()),\n default_decoder,\n )\n instructions.append(\n type(insn.name, (Instruction, ), {\n 'name': insn.name,\n 'opcode': insn.opcode,\n 'opcode_mask': insn.mask,\n 'have_extra_operand': insn.size == 2,\n 'is_extended': insn.is_extended,\n 'decode': insn_decoder,\n 'decode_operands': decode_operands,\n 'operands_format': insn.operands\n })\n )\n\n for ext in gcdsp.decoders.opcodes_ext:\n ext = Insn(*ext)\n instruction_extensions.append(\n type(ext.name, (InstructionExtension, ), {\n 'name': ext.name,\n 'opcode': ext.opcode,\n 'opcode_mask': ext.mask,\n 'decode': insn_decoder,\n 'decode_operands': decode_operands,\n 'operands_format': insn.operands\n })\n )\nload_insns()\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n\n # Parse the extra operand, if any.\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n\n # Parse the instruction extension, if any.\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(\n insn.name,\n \"'{}\".format(insn.extension.name) if insn.is_extended else ''\n )\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n\n # Always decode the extension first (if any).\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n # TODO: remove this once all extensions are supported.\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-ids": [
18,
24,
25,
29,
33
]
}
|
[
18,
24,
25,
29,
33
] |
from __future__ import division
import re
import sys
import six
from six.moves import queue
import os
import io
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.cloud import speech as speech1
from google.cloud.speech import enums as enums2
from google.cloud.speech import types as types2
from google.cloud import speech_v1p1beta1 as speech2
class Google_Cloud:
def __init__(self, text):
print(text)
self.client = language.LanguageServiceClient()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
self.document = types.Document(
content=text.encode('utf-8'),
type=enums.Document.Type.PLAIN_TEXT)
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
def entities(self):
google_entities = self.client.analyze_entities(self.document).entities
entities = []
for entity in google_entities:
entities.append(entity.name.lower())
entities.sort()
return entities
def entity_sentiment(self):
# Detect and send native Python encoding to receive correct word offsets.
encoding = enums.EncodingType.UTF32
if sys.maxunicode == 65535:
encoding = enums.EncodingType.UTF16
result = self.client.analyze_entity_sentiment(self.document, encoding)
entities = {}
for entity in result.entities:
entity_str = ""
entity_str += 'Mentions: '
entity_str += (u'Name: "{}"'.format(entity.name))
name = entity.name
entities[name] = entity.sentiment
return entities
def syntax(self):
"""Detects syntax in the text."""
# Detects syntax in the document. You can also analyze HTML with:
# document.type == enums.Document.Type.HTML
tokens = self.client.analyze_syntax(self.document).tokens
# part-of-speech tags from enums.PartOfSpeech.Tag
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',
'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')
result = []
for token in tokens:
result.append((u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content)))
return result
def categories(self):
"""Classifies content categories of the provided text."""
categories = self.client.classify_text(self.document).categories
result = []
for category in categories:
result.append(category.name)
return result
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
#with io.open(self.audio_file, 'rb') as audio_file:
# content = audio_file.read()
#print(type(content))
#audio = types2.RecognitionAudio(uri=uri)
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
audio_channel_count=2,
enable_separate_recognition_per_channel=True
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.FLAC,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e:
print(e)
else:
return "Please use .wav or .flac audio files"
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(
encoding=speech2.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
|
normal
|
{
"blob_id": "6868a8b5d36403f1417301acdca5f5dc9e45c682",
"index": 9849,
"step-1": "<mask token>\n\n\nclass Google_Cloud:\n <mask token>\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-2": "<mask token>\n\n\nclass Google_Cloud:\n <mask token>\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n\n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n entities.sort()\n return entities\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-3": "<mask token>\n\n\nclass Google_Cloud:\n <mask token>\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n\n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n entities.sort()\n return entities\n <mask token>\n\n def syntax(self):\n \"\"\"Detects syntax in the text.\"\"\"\n tokens = self.client.analyze_syntax(self.document).tokens\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN',\n 'NUM', 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n result = []\n for token in tokens:\n result.append(u'{}: {}'.format(pos_tag[token.part_of_speech.tag\n ], token.text.content))\n return result\n\n def categories(self):\n \"\"\"Classifies content categories of the provided text.\"\"\"\n categories = self.client.classify_text(self.document).categories\n result = []\n for category in categories:\n result.append(category.name)\n return result\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-4": "<mask token>\n\n\nclass Google_Cloud:\n\n def __init__(self, text):\n print(text)\n self.client = language.LanguageServiceClient()\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n self.document = types.Document(content=text.encode('utf-8'), type=\n enums.Document.Type.PLAIN_TEXT)\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n\n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n entities.sort()\n return entities\n <mask token>\n\n def syntax(self):\n \"\"\"Detects syntax in the text.\"\"\"\n tokens = self.client.analyze_syntax(self.document).tokens\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN',\n 'NUM', 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n result = []\n for token in tokens:\n result.append(u'{}: {}'.format(pos_tag[token.part_of_speech.tag\n ], token.text.content))\n return result\n\n def categories(self):\n \"\"\"Classifies content categories of the provided text.\"\"\"\n categories = self.client.classify_text(self.document).categories\n result = []\n for category in categories:\n result.append(category.name)\n return result\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-5": "from __future__ import division\n\nimport re\nimport sys\nimport six\nfrom six.moves import queue\nimport os\nimport io\nfrom google.cloud import language\nfrom google.cloud.language import enums\nfrom google.cloud.language import types\nfrom google.cloud import speech as speech1\nfrom google.cloud.speech import enums as enums2\nfrom google.cloud.speech import types as types2\nfrom google.cloud import speech_v1p1beta1 as speech2\n\n\nclass Google_Cloud:\n\n def __init__(self, text):\n print(text)\n self.client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n self.document = types.Document(\n content=text.encode('utf-8'),\n type=enums.Document.Type.PLAIN_TEXT)\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n \n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n \n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n\n entities.sort()\n return entities\n\n def entity_sentiment(self):\n # Detect and send native Python encoding to receive correct word offsets.\n encoding = enums.EncodingType.UTF32\n if sys.maxunicode == 65535:\n encoding = enums.EncodingType.UTF16\n\n result = self.client.analyze_entity_sentiment(self.document, encoding)\n\n entities = {}\n for entity in result.entities:\n entity_str = \"\"\n entity_str += 'Mentions: '\n entity_str += (u'Name: \"{}\"'.format(entity.name))\n name = entity.name\n entities[name] = entity.sentiment\n\n return entities\n\n def syntax(self):\n \"\"\"Detects syntax in the text.\"\"\"\n\n # Detects syntax in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n tokens = self.client.analyze_syntax(self.document).tokens\n\n # part-of-speech tags from enums.PartOfSpeech.Tag\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',\n 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n\n result = []\n for token in tokens:\n result.append((u'{}: {}'.format(pos_tag[token.part_of_speech.tag],\n token.text.content)))\n \n return result\n\n def categories(self):\n \"\"\"Classifies content categories of the provided text.\"\"\"\n categories = self.client.classify_text(self.document).categories\n\n result = []\n for category in categories:\n result.append(category.name)\n\n return result\n \nclass Google_ST:\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n #with io.open(self.audio_file, 'rb') as audio_file:\n # content = audio_file.read()\n #print(type(content))\n #audio = types2.RecognitionAudio(uri=uri)\n\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(\n encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n audio_channel_count=2,\n enable_separate_recognition_per_channel=True\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return result_str\n\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(\n encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return result_str\n\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(\n encoding=speech1.enums.RecognitionConfig.AudioEncoding.FLAC,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return result_str \n except Exception as e:\n print(e)\n\n else:\n return \"Please use .wav or .flac audio files\"\n\n \n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(\n encoding=speech2.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n \n return result_str\n\n \n",
"step-ids": [
7,
8,
10,
11,
14
]
}
|
[
7,
8,
10,
11,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(A.upper() + ' World!')
<|reserved_special_token_1|>
A = input('입력해주세요.\n')
print(A.upper() + ' World!')
<|reserved_special_token_1|>
A = input("입력해주세요.\n") #입력값을 in_AAA로 칭한다
#\n은 문법의 줄바꾸기
print(A.upper()+" World!") #in_AAA를 출력 + "World!")
#upper()는 앞의 값을 대문자화+"
|
flexible
|
{
"blob_id": "8a54a71b08d10c5da9ca440e8e4f61f908e00d54",
"index": 9496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(A.upper() + ' World!')\n",
"step-3": "A = input('입력해주세요.\\n')\nprint(A.upper() + ' World!')\n",
"step-4": "A = input(\"입력해주세요.\\n\") #입력값을 in_AAA로 칭한다\r\n #\\n은 문법의 줄바꾸기\r\n\r\nprint(A.upper()+\" World!\") #in_AAA를 출력 + \"World!\")\r\n #upper()는 앞의 값을 대문자화+\"\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'core'
urlpatterns = [path('', views.index, name='home'), path(
'property_for_rent/', views.propertyForRent, name='property_rent'),
path('property_for_sale/', views.propertyForSale, name='property_sale'),
path('property/<int:pk>', PropertyDetailView.as_view(), name=
'property_detail'), path('contact/', views.contact, name='contact')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
from .views import propertyForRent, propertyForSale, PropertyDetailView
app_name = 'core'
urlpatterns = [path('', views.index, name='home'), path(
'property_for_rent/', views.propertyForRent, name='property_rent'),
path('property_for_sale/', views.propertyForSale, name='property_sale'),
path('property/<int:pk>', PropertyDetailView.as_view(), name=
'property_detail'), path('contact/', views.contact, name='contact')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
from .views import propertyForRent, propertyForSale, PropertyDetailView
app_name = "core"
urlpatterns = [
path("", views.index, name="home"),
path("property_for_rent/", views.propertyForRent, name="property_rent"),
path("property_for_sale/", views.propertyForSale, name="property_sale"),
path("property/<int:pk>", PropertyDetailView.as_view(), name="property_detail"),
path("contact/", views.contact, name="contact"),
]
|
flexible
|
{
"blob_id": "e2671911894871c32ad933fde8e05c913a4cc942",
"index": 7149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'core'\nurlpatterns = [path('', views.index, name='home'), path(\n 'property_for_rent/', views.propertyForRent, name='property_rent'),\n path('property_for_sale/', views.propertyForSale, name='property_sale'),\n path('property/<int:pk>', PropertyDetailView.as_view(), name=\n 'property_detail'), path('contact/', views.contact, name='contact')]\n",
"step-3": "from django.urls import path\nfrom . import views\nfrom .views import propertyForRent, propertyForSale, PropertyDetailView\napp_name = 'core'\nurlpatterns = [path('', views.index, name='home'), path(\n 'property_for_rent/', views.propertyForRent, name='property_rent'),\n path('property_for_sale/', views.propertyForSale, name='property_sale'),\n path('property/<int:pk>', PropertyDetailView.as_view(), name=\n 'property_detail'), path('contact/', views.contact, name='contact')]\n",
"step-4": "from django.urls import path\nfrom . import views\nfrom .views import propertyForRent, propertyForSale, PropertyDetailView\n\napp_name = \"core\"\n\nurlpatterns = [\n path(\"\", views.index, name=\"home\"),\n path(\"property_for_rent/\", views.propertyForRent, name=\"property_rent\"),\n path(\"property_for_sale/\", views.propertyForSale, name=\"property_sale\"),\n path(\"property/<int:pk>\", PropertyDetailView.as_view(), name=\"property_detail\"),\n path(\"contact/\", views.contact, name=\"contact\"),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def inicio():
global P, M, G, en
B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',
bg='yellow').grid(column=1, row=1)
B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',
bg='orange').grid(column=2, row=1)
B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',
bg='red').grid(column=3, row=1)
B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',
bg='green').grid(column=4, row=1)
def A():
global P
P = P + 1
def B():
global M
M = M + 1
def C():
global G
G = G + 1
def D():
global P, M, G
l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(
column=0, row=2)
l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(
column=0, row=3)
l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(
column=0, row=4)
l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)
).grid(column=0, row=5)
if en == 1:
inicio()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global P, M, G, en
<|reserved_special_token_0|>
def inicio():
global P, M, G, en
B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',
bg='yellow').grid(column=1, row=1)
B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',
bg='orange').grid(column=2, row=1)
B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',
bg='red').grid(column=3, row=1)
B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',
bg='green').grid(column=4, row=1)
def A():
global P
P = P + 1
def B():
global M
M = M + 1
def C():
global G
G = G + 1
def D():
global P, M, G
l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(
column=0, row=2)
l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(
column=0, row=3)
l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(
column=0, row=4)
l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)
).grid(column=0, row=5)
if en == 1:
inicio()
<|reserved_special_token_0|>
inicio()
ventana.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global P, M, G, en
P = 0
M = 0
G = 0
en = 1
def inicio():
global P, M, G, en
B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',
bg='yellow').grid(column=1, row=1)
B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',
bg='orange').grid(column=2, row=1)
B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',
bg='red').grid(column=3, row=1)
B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',
bg='green').grid(column=4, row=1)
def A():
global P
P = P + 1
def B():
global M
M = M + 1
def C():
global G
G = G + 1
def D():
global P, M, G
l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(
column=0, row=2)
l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(
column=0, row=3)
l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(
column=0, row=4)
l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)
).grid(column=0, row=5)
if en == 1:
inicio()
ventana = Tk()
inicio()
ventana.mainloop()
<|reserved_special_token_1|>
from tkinter import *
global P, M, G, en
P = 0
M = 0
G = 0
en = 1
def inicio():
global P, M, G, en
B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',
bg='yellow').grid(column=1, row=1)
B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',
bg='orange').grid(column=2, row=1)
B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',
bg='red').grid(column=3, row=1)
B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',
bg='green').grid(column=4, row=1)
def A():
global P
P = P + 1
def B():
global M
M = M + 1
def C():
global G
G = G + 1
def D():
global P, M, G
l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(
column=0, row=2)
l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(
column=0, row=3)
l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(
column=0, row=4)
l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)
).grid(column=0, row=5)
if en == 1:
inicio()
ventana = Tk()
inicio()
ventana.mainloop()
<|reserved_special_token_1|>
from tkinter import *
global P,M,G,en
P=0
M=0
G=0
en=1
def inicio():
global P,M,G,en
B1=Button(ventana,text="CAJAS PEQUEÑAS",command=A,state="normal",bg="yellow").grid(column=1,row=1)
B2=Button(ventana,text="CAJAS MEDIANAS",command=B,state="normal",bg="orange").grid(column=2,row=1)
B3=Button(ventana,text="CAJAS GRANDES",command=C,state="normal",bg="red").grid(column=3,row=1)
B4=Button(ventana,text="TOTAL DE CAJAS",command=D,state="normal",bg="green").grid(column=4,row=1)
def A ():
global P
P=P+1
def B ():
global M
M=M+1
def C ():
global G
G=G+1
def D ():
global P,M,G
l=Label(ventana,text="El total de CAJAS PEQUEÑAS es:"+str(P)).grid(column=0,row=2)
l=Label(ventana,text="El total de CAJAS MEDIANAS es:"+str(M)).grid(column=0,row=3)
l=Label(ventana,text="El total de CAJAS GRANDES es:"+str(G)).grid(column=0,row=4)
l=Label(ventana,text="EL TOTAL DE CAJAS CONTADAS ES:"+str(P+M+G)).grid(column=0,row=5)
if(en==1):
inicio()
ventana=Tk()
inicio()
ventana.mainloop()
|
flexible
|
{
"blob_id": "393af07fa7a5c265dbdd3047ef33a77130edf259",
"index": 1915,
"step-1": "<mask token>\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\n<mask token>\n",
"step-2": "<mask token>\nglobal P, M, G, en\n<mask token>\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\n<mask token>\ninicio()\nventana.mainloop()\n",
"step-3": "<mask token>\nglobal P, M, G, en\nP = 0\nM = 0\nG = 0\nen = 1\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\nventana = Tk()\ninicio()\nventana.mainloop()\n",
"step-4": "from tkinter import *\nglobal P, M, G, en\nP = 0\nM = 0\nG = 0\nen = 1\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\nventana = Tk()\ninicio()\nventana.mainloop()\n",
"step-5": "from tkinter import *\r\nglobal P,M,G,en\r\nP=0\r\nM=0\r\nG=0\r\nen=1\r\ndef inicio():\r\n global P,M,G,en\r\n \r\n B1=Button(ventana,text=\"CAJAS PEQUEÑAS\",command=A,state=\"normal\",bg=\"yellow\").grid(column=1,row=1)\r\n B2=Button(ventana,text=\"CAJAS MEDIANAS\",command=B,state=\"normal\",bg=\"orange\").grid(column=2,row=1)\r\n B3=Button(ventana,text=\"CAJAS GRANDES\",command=C,state=\"normal\",bg=\"red\").grid(column=3,row=1)\r\n B4=Button(ventana,text=\"TOTAL DE CAJAS\",command=D,state=\"normal\",bg=\"green\").grid(column=4,row=1)\r\n \r\n\r\ndef A ():\r\n global P\r\n P=P+1\r\ndef B ():\r\n global M\r\n M=M+1\r\ndef C ():\r\n global G\r\n G=G+1\r\ndef D ():\r\n global P,M,G\r\n l=Label(ventana,text=\"El total de CAJAS PEQUEÑAS es:\"+str(P)).grid(column=0,row=2) \r\n l=Label(ventana,text=\"El total de CAJAS MEDIANAS es:\"+str(M)).grid(column=0,row=3)\r\n l=Label(ventana,text=\"El total de CAJAS GRANDES es:\"+str(G)).grid(column=0,row=4)\r\n l=Label(ventana,text=\"EL TOTAL DE CAJAS CONTADAS ES:\"+str(P+M+G)).grid(column=0,row=5)\r\n\r\n \r\n if(en==1):\r\n inicio()\r\n\r\n \r\nventana=Tk()\r\ninicio()\r\nventana.mainloop()\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# Complete the hurdleRace function below.
def hurdleRace(k, height):
if k < max(height):
return max(height) - k
return 0
print(hurdleRace(2, [2,5,4,5,2]))
|
normal
|
{
"blob_id": "c139cbc3e693d75ad196e10257ff3028aa835709",
"index": 428,
"step-1": "<mask token>\n",
"step-2": "def hurdleRace(k, height):\n if k < max(height):\n return max(height) - k\n return 0\n\n\n<mask token>\n",
"step-3": "def hurdleRace(k, height):\n if k < max(height):\n return max(height) - k\n return 0\n\n\nprint(hurdleRace(2, [2, 5, 4, 5, 2]))\n",
"step-4": "# Complete the hurdleRace function below.\ndef hurdleRace(k, height):\n if k < max(height):\n return max(height) - k\n return 0\n\nprint(hurdleRace(2, [2,5,4,5,2]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
x = 5
print(x , " "*3 , "5")
print("{:20d}".format(x))
|
normal
|
{
"blob_id": "88542a18d98a215f58333f5dd2bf5c4b0d37f32f",
"index": 5539,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x, ' ' * 3, '5')\nprint('{:20d}'.format(x))\n",
"step-3": "x = 5\nprint(x, ' ' * 3, '5')\nprint('{:20d}'.format(x))\n",
"step-4": "x = 5\nprint(x , \" \"*3 , \"5\")\nprint(\"{:20d}\".format(x))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 5/1/2020
# Import median function from numpy
import numpy as np
from numpy import median
# Plot the median number of absences instead of the mean
sns.catplot(x="romantic", y="absences",
data=student_data,
kind="point",
hue="school",
ci=None,
estimator = median)
# Show plot
plt.show()
|
normal
|
{
"blob_id": "11072601e31ceba13f8adf6c070f84ca5add35e9",
"index": 3300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsns.catplot(x='romantic', y='absences', data=student_data, kind='point',\n hue='school', ci=None, estimator=median)\nplt.show()\n",
"step-3": "import numpy as np\nfrom numpy import median\nsns.catplot(x='romantic', y='absences', data=student_data, kind='point',\n hue='school', ci=None, estimator=median)\nplt.show()\n",
"step-4": "# 5/1/2020\n# Import median function from numpy\nimport numpy as np\nfrom numpy import median\n\n# Plot the median number of absences instead of the mean\nsns.catplot(x=\"romantic\", y=\"absences\",\n\t\t\tdata=student_data,\n kind=\"point\",\n hue=\"school\",\n ci=None,\n estimator = median)\n\n# Show plot\nplt.show()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def convert_to_bs(ad_date):
date_components = decompose_date(ad_date)
year, month, day = date_components
bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)
return formatted_date
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def convert_to_ad(bs_date):
date_components = decompose_date(bs_date)
year, month, day = date_components
ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(ad_year, ad_month, ad_day)
return formatted_date
def convert_to_bs(ad_date):
date_components = decompose_date(ad_date)
year, month, day = date_components
bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)
return formatted_date
<|reserved_special_token_1|>
__author__ = 'sushil'
<|reserved_special_token_0|>
def convert_to_ad(bs_date):
date_components = decompose_date(bs_date)
year, month, day = date_components
ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(ad_year, ad_month, ad_day)
return formatted_date
def convert_to_bs(ad_date):
date_components = decompose_date(ad_date)
year, month, day = date_components
bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)
return formatted_date
<|reserved_special_token_1|>
__author__ = 'sushil'
from .utilities import decompose_date
from .DateConverter import _bs_to_ad, _ad_to_bs
def convert_to_ad(bs_date):
date_components = decompose_date(bs_date)
year, month, day = date_components
ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(ad_year, ad_month, ad_day)
return formatted_date
def convert_to_bs(ad_date):
date_components = decompose_date(ad_date)
year, month, day = date_components
bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)
formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)
return formatted_date
<|reserved_special_token_1|>
__author__ = 'sushil'
from .utilities import decompose_date
from .DateConverter import _bs_to_ad, _ad_to_bs
def convert_to_ad(bs_date):
date_components = decompose_date(bs_date)
year, month, day = date_components
ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)
formatted_date = "{}-{:02}-{:02}".format(ad_year, ad_month, ad_day)
return formatted_date
def convert_to_bs(ad_date):
date_components = decompose_date(ad_date)
year, month, day = date_components
bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)
formatted_date = "{}-{:02}-{:02}".format(bs_year, bs_month, bs_day)
return formatted_date
|
flexible
|
{
"blob_id": "e7295336a168aa2361a9090e79465eab5f564599",
"index": 5076,
"step-1": "<mask token>\n\n\ndef convert_to_bs(ad_date):\n date_components = decompose_date(ad_date)\n year, month, day = date_components\n bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)\n return formatted_date\n",
"step-2": "<mask token>\n\n\ndef convert_to_ad(bs_date):\n date_components = decompose_date(bs_date)\n year, month, day = date_components\n ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(ad_year, ad_month, ad_day)\n return formatted_date\n\n\ndef convert_to_bs(ad_date):\n date_components = decompose_date(ad_date)\n year, month, day = date_components\n bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)\n return formatted_date\n",
"step-3": "__author__ = 'sushil'\n<mask token>\n\n\ndef convert_to_ad(bs_date):\n date_components = decompose_date(bs_date)\n year, month, day = date_components\n ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(ad_year, ad_month, ad_day)\n return formatted_date\n\n\ndef convert_to_bs(ad_date):\n date_components = decompose_date(ad_date)\n year, month, day = date_components\n bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)\n return formatted_date\n",
"step-4": "__author__ = 'sushil'\nfrom .utilities import decompose_date\nfrom .DateConverter import _bs_to_ad, _ad_to_bs\n\n\ndef convert_to_ad(bs_date):\n date_components = decompose_date(bs_date)\n year, month, day = date_components\n ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(ad_year, ad_month, ad_day)\n return formatted_date\n\n\ndef convert_to_bs(ad_date):\n date_components = decompose_date(ad_date)\n year, month, day = date_components\n bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)\n formatted_date = '{}-{:02}-{:02}'.format(bs_year, bs_month, bs_day)\n return formatted_date\n",
"step-5": "__author__ = 'sushil'\nfrom .utilities import decompose_date\nfrom .DateConverter import _bs_to_ad, _ad_to_bs\n\ndef convert_to_ad(bs_date):\n date_components = decompose_date(bs_date)\n year, month, day = date_components\n\n ad_year, ad_month, ad_day = _bs_to_ad(year, month, day)\n formatted_date = \"{}-{:02}-{:02}\".format(ad_year, ad_month, ad_day)\n return formatted_date\n\ndef convert_to_bs(ad_date):\n date_components = decompose_date(ad_date)\n year, month, day = date_components\n\n bs_year, bs_month, bs_day = _ad_to_bs(year, month, day)\n formatted_date = \"{}-{:02}-{:02}\".format(bs_year, bs_month, bs_day)\n return formatted_date\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GPIO.setmode(GPIO.BCM)
<|reserved_special_token_0|>
for x in range(len(pins)):
GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = 0
for i in range(len(pins)):
input_state = GPIO.input(pins[i])
if input_state == False:
print('Button {0} Pressed'.format(names[i]))
time.sleep(0.2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GPIO.setmode(GPIO.BCM)
pins = [21, 25, 18]
names = ['First', 'Second', 'Third']
for x in range(len(pins)):
GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = 0
for i in range(len(pins)):
input_state = GPIO.input(pins[i])
if input_state == False:
print('Button {0} Pressed'.format(names[i]))
time.sleep(0.2)
<|reserved_special_token_1|>
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
pins = [21, 25, 18]
names = ['First', 'Second', 'Third']
for x in range(len(pins)):
GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = 0
for i in range(len(pins)):
input_state = GPIO.input(pins[i])
if input_state == False:
print('Button {0} Pressed'.format(names[i]))
time.sleep(0.2)
<|reserved_special_token_1|>
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
#led = 21
pins = [21, 25, 18]
# 0 1 2 3 4
names = ["First", "Second", "Third"]
for x in range(len(pins)):
GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)
#GPIO.setup(led, GPIO.OUT)
while True:
input_state = 0
for i in range(len(pins)):
input_state = GPIO.input(pins[i])
if input_state == False:
print('Button {0} Pressed'.format(names[i]))
time.sleep(0.2)
# if (i == 0):
# print("TURN ON LED")
# GPIO.output(led, 1)
# if (i == 1):
# print("TURN OFF LED")
# GPIO.output(led, 0)
|
flexible
|
{
"blob_id": "d292de887c427e3a1b95d13cef17de1804f8f9ee",
"index": 6535,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\npins = [21, 25, 18]\nnames = ['First', 'Second', 'Third']\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n",
"step-4": "import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\npins = [21, 25, 18]\nnames = ['First', 'Second', 'Third']\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n",
"step-5": "import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\n#led = 21\n\npins = [21, 25, 18]\n# 0 1 2 3 4\nnames = [\"First\", \"Second\", \"Third\"]\n\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n#GPIO.setup(led, GPIO.OUT)\n\n\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n # if (i == 0):\n # print(\"TURN ON LED\")\n # GPIO.output(led, 1)\n # if (i == 1):\n # print(\"TURN OFF LED\")\n # GPIO.output(led, 0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# given a set A and n other sets.
# find whether set A is a strict superset of each of the n sets
# print True if yes, otherwise False
A = set(map(int, input().split()))
b = []
for _ in range(int(input())):
b.append(A > set(map(int, input().split())))
print(all(b))
|
normal
|
{
"blob_id": "a9eb2b3f26396918c792de3f126e51bde334b709",
"index": 7777,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\nprint(all(b))\n",
"step-3": "A = set(map(int, input().split()))\nb = []\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\nprint(all(b))\n",
"step-4": "#!/usr/bin/env python3\n\n# given a set A and n other sets.\n# find whether set A is a strict superset of each of the n sets\n# print True if yes, otherwise False\n\nA = set(map(int, input().split()))\nb = []\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\n\nprint(all(b))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from boa3.builtin import public
@public
def Main() ->int:
a = 'just a test'
return len(a)
|
normal
|
{
"blob_id": "e44e19dbeb6e1e346ca371ca8730f53ee5b95d47",
"index": 5402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@public\ndef Main() ->int:\n a = 'just a test'\n return len(a)\n",
"step-3": "from boa3.builtin import public\n\n\n@public\ndef Main() ->int:\n a = 'just a test'\n return len(a)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class CategoryViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CategoryViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_permissions(self):
permission_classes = permissions.AllowAny,
return [permission() for permission in permission_classes]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
queryset = Category.objects.all()
def get_permissions(self):
permission_classes = permissions.AllowAny,
return [permission() for permission in permission_classes]
<|reserved_special_token_1|>
from . import *
from rest_framework import permissions
from core.serializers import CategorySerializer
from core.models.category_model import Category
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
queryset = Category.objects.all()
def get_permissions(self):
permission_classes = permissions.AllowAny,
return [permission() for permission in permission_classes]
<|reserved_special_token_1|>
from . import *
from rest_framework import permissions
from core.serializers import CategorySerializer
from core.models.category_model import Category
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
queryset = Category.objects.all()
def get_permissions(self):
permission_classes = (permissions.AllowAny,)
return [permission() for permission in permission_classes]
|
flexible
|
{
"blob_id": "5723e7889663142832a8131bb5f4c35d29692a49",
"index": 6325,
"step-1": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n",
"step-3": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n",
"step-4": "from . import *\nfrom rest_framework import permissions\nfrom core.serializers import CategorySerializer\nfrom core.models.category_model import Category\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n",
"step-5": "from . import *\nfrom rest_framework import permissions\n\nfrom core.serializers import CategorySerializer\nfrom core.models.category_model import Category\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = (permissions.AllowAny,)\n return [permission() for permission in permission_classes]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(file_name, 'a') as file_object:
json.dump(favourite_number, file_object)
print(f'{favourite_number} is saved in {file_name}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
file_name = 'supporting_files/favourite_number.json'
favourite_number = input('Enter you favourite number')
with open(file_name, 'a') as file_object:
json.dump(favourite_number, file_object)
print(f'{favourite_number} is saved in {file_name}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
file_name = 'supporting_files/favourite_number.json'
favourite_number = input('Enter you favourite number')
with open(file_name, 'a') as file_object:
json.dump(favourite_number, file_object)
print(f'{favourite_number} is saved in {file_name}')
<|reserved_special_token_1|>
"""
Write a program that prompts for the user’s favorite number.
Use json.dump() to store this number in a file. Write a separate program that reads in this value and
prints the message, “I know your favorite number! It’s _____.”
"""
import json
file_name = 'supporting_files/favourite_number.json'
favourite_number = input('Enter you favourite number')
with open(file_name, 'a') as file_object:
json.dump(favourite_number, file_object)
print(f'{favourite_number} is saved in {file_name}')
|
flexible
|
{
"blob_id": "7a359d4b31bd1fd35cd1a9a1de4cbf4635e23def",
"index": 7932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n",
"step-3": "<mask token>\nfile_name = 'supporting_files/favourite_number.json'\nfavourite_number = input('Enter you favourite number')\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n",
"step-4": "<mask token>\nimport json\nfile_name = 'supporting_files/favourite_number.json'\nfavourite_number = input('Enter you favourite number')\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n",
"step-5": "\"\"\"\nWrite a program that prompts for the user’s favorite number.\nUse json.dump() to store this number in a file. Write a separate program that reads in this value and\nprints the message, “I know your favorite number! It’s _____.”\n\"\"\"\n\nimport json\n\nfile_name = 'supporting_files/favourite_number.json'\nfavourite_number = input('Enter you favourite number')\n\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Дано натуральное число. Требуется определить,
# является ли год с данным номером високосным.
# Если год является високосным, то выведите `YES`, иначе выведите `NO`.
# Напомним, что в соответствии с григорианским календарем, год является високосным,
# если его номер кратен 4, но не кратен 100, а также если он кратен 400.
year = int(input('введите год '))
if year % 4 == 0 and not year % 100 == 0:
print('YES')
elif year % 400 == 0:
print('yes')
else:
print('NO')
|
normal
|
{
"blob_id": "99e6e734c7d638e3cf4d50d9605c99d5e700e82a",
"index": 1699,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif year % 4 == 0 and not year % 100 == 0:\n print('YES')\nelif year % 400 == 0:\n print('yes')\nelse:\n print('NO')\n",
"step-3": "year = int(input('введите год '))\nif year % 4 == 0 and not year % 100 == 0:\n print('YES')\nelif year % 400 == 0:\n print('yes')\nelse:\n print('NO')\n",
"step-4": "# Дано натуральное число. Требуется определить,\n# является ли год с данным номером високосным.\n# Если год является високосным, то выведите `YES`, иначе выведите `NO`.\n# Напомним, что в соответствии с григорианским календарем, год является високосным,\n# если его номер кратен 4, но не кратен 100, а также если он кратен 400.\n\nyear = int(input('введите год '))\nif year % 4 == 0 and not year % 100 == 0:\n print('YES')\nelif year % 400 == 0:\n print('yes')\nelse:\n print('NO')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Task:
<|reserved_special_token_0|>
def set_ready(self, ready: float) ->None:
self._ready = ready
<|reserved_special_token_0|>
def __call__(self) ->None:
self._f()
<|reserved_special_token_0|>
def __str__(self):
return 'Task(' + str(self._ready) + ')'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Task:
<|reserved_special_token_0|>
def set_ready(self, ready: float) ->None:
self._ready = ready
def get_ready(self) ->float:
return self._ready
def __call__(self) ->None:
self._f()
<|reserved_special_token_0|>
def __str__(self):
return 'Task(' + str(self._ready) + ')'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Task:
def __init__(self, f, ready: float):
self._f = f
self._ready = ready
def set_ready(self, ready: float) ->None:
self._ready = ready
def get_ready(self) ->float:
return self._ready
def __call__(self) ->None:
self._f()
<|reserved_special_token_0|>
def __str__(self):
return 'Task(' + str(self._ready) + ')'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Task:
def __init__(self, f, ready: float):
self._f = f
self._ready = ready
def set_ready(self, ready: float) ->None:
self._ready = ready
def get_ready(self) ->float:
return self._ready
def __call__(self) ->None:
self._f()
def __lt__(self, other) ->bool:
return self._ready < other.get_ready()
def __str__(self):
return 'Task(' + str(self._ready) + ')'
<|reserved_special_token_1|>
from time import time
class Task:
def __init__(self, f, ready: float):
self._f = f
self._ready = ready
def set_ready(self, ready: float) -> None:
self._ready = ready
def get_ready(self) -> float:
return self._ready
def __call__(self) -> None:
self._f()
def __lt__(self, other) -> bool:
return self._ready < other.get_ready()
def __str__(self):
return "Task(" + str(self._ready) + ")"
|
flexible
|
{
"blob_id": "b094693b11fdc4f5fbff30e79a9f82d40104611d",
"index": 2697,
"step-1": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def set_ready(self, ready: float) ->None:\n self._ready = ready\n <mask token>\n\n def __call__(self) ->None:\n self._f()\n <mask token>\n\n def __str__(self):\n return 'Task(' + str(self._ready) + ')'\n",
"step-2": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def set_ready(self, ready: float) ->None:\n self._ready = ready\n\n def get_ready(self) ->float:\n return self._ready\n\n def __call__(self) ->None:\n self._f()\n <mask token>\n\n def __str__(self):\n return 'Task(' + str(self._ready) + ')'\n",
"step-3": "<mask token>\n\n\nclass Task:\n\n def __init__(self, f, ready: float):\n self._f = f\n self._ready = ready\n\n def set_ready(self, ready: float) ->None:\n self._ready = ready\n\n def get_ready(self) ->float:\n return self._ready\n\n def __call__(self) ->None:\n self._f()\n <mask token>\n\n def __str__(self):\n return 'Task(' + str(self._ready) + ')'\n",
"step-4": "<mask token>\n\n\nclass Task:\n\n def __init__(self, f, ready: float):\n self._f = f\n self._ready = ready\n\n def set_ready(self, ready: float) ->None:\n self._ready = ready\n\n def get_ready(self) ->float:\n return self._ready\n\n def __call__(self) ->None:\n self._f()\n\n def __lt__(self, other) ->bool:\n return self._ready < other.get_ready()\n\n def __str__(self):\n return 'Task(' + str(self._ready) + ')'\n",
"step-5": "from time import time\n\nclass Task:\n def __init__(self, f, ready: float):\n self._f = f\n self._ready = ready\n\n def set_ready(self, ready: float) -> None:\n self._ready = ready\n\n def get_ready(self) -> float:\n return self._ready\n\n def __call__(self) -> None:\n self._f()\n\n def __lt__(self, other) -> bool:\n return self._ready < other.get_ready()\n\n def __str__(self):\n return \"Task(\" + str(self._ready) + \")\"\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
import sys
from ulang.runtime.main import main
main(sys.argv)
|
normal
|
{
"blob_id": "e0c5498d9b18a6a32fcd2725ef4f6a1adaef6c68",
"index": 2098,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmain(sys.argv)\n",
"step-3": "import sys\nfrom ulang.runtime.main import main\nmain(sys.argv)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)
cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +
math.sqrt(len(key_words)))
return cosinus_similarity
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)
cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +
math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words,
context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],
reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document
], context_words)
simularity = context_simularity * referens_simularity
if simularity != 0:
ranked_documents.append((document, simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True
)
return ranked_documents
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_tf_idf_map(document, max_freq, n_docs, index):
tf_idf_map = {}
for term in document:
tf = 0
idf = math.log(n_docs)
if term in index and term not in tf_idf_map:
posting_list = index[term]
freq_term = sum([post[1] for post in posting_list])
tf = 0.5 + 0.5 * (freq_term / max_freq)
idf = math.log(1 + n_docs / len(posting_list))
if term not in tf_idf_map:
tf_idf_map[term] = tf * idf
return tf_idf_map
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)
cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +
math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words,
context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],
reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document
], context_words)
simularity = context_simularity * referens_simularity
if simularity != 0:
ranked_documents.append((document, simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True
)
return ranked_documents
<|reserved_special_token_1|>
from operator import itemgetter
import math
def get_tf_idf_map(document, max_freq, n_docs, index):
tf_idf_map = {}
for term in document:
tf = 0
idf = math.log(n_docs)
if term in index and term not in tf_idf_map:
posting_list = index[term]
freq_term = sum([post[1] for post in posting_list])
tf = 0.5 + 0.5 * (freq_term / max_freq)
idf = math.log(1 + n_docs / len(posting_list))
if term not in tf_idf_map:
tf_idf_map[term] = tf * idf
return tf_idf_map
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)
cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +
math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words,
context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],
reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document
], context_words)
simularity = context_simularity * referens_simularity
if simularity != 0:
ranked_documents.append((document, simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True
)
return ranked_documents
<|reserved_special_token_1|>
from operator import itemgetter
import math
def get_tf_idf_map(document, max_freq, n_docs, index):
tf_idf_map = {}
for term in document:
tf = 0
idf = math.log(n_docs)
if term in index and term not in tf_idf_map:
posting_list = index[term]
freq_term = sum([post[1] for post in posting_list])
tf = 0.5 + 0.5*(freq_term/max_freq)
idf = math.log(1 + (n_docs/len(posting_list)))
if term not in tf_idf_map:
tf_idf_map[term] = tf * idf
return tf_idf_map
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term],2)
cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)
simularity = context_simularity*referens_simularity
if(simularity != 0):
ranked_documents.append((document,simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)
return ranked_documents
|
flexible
|
{
"blob_id": "39197b3f9f85d94457584d7e488ca376e52207f1",
"index": 5832,
"step-1": "<mask token>\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-3": "<mask token>\n\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map:\n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list])\n tf = 0.5 + 0.5 * (freq_term / max_freq)\n idf = math.log(1 + n_docs / len(posting_list))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n return tf_idf_map\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-4": "from operator import itemgetter\nimport math\n\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map:\n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list])\n tf = 0.5 + 0.5 * (freq_term / max_freq)\n idf = math.log(1 + n_docs / len(posting_list))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n return tf_idf_map\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-5": "from operator import itemgetter\nimport math\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n \n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map: \n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list]) \n tf = 0.5 + 0.5*(freq_term/max_freq)\n idf = math.log(1 + (n_docs/len(posting_list)))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n\n return tf_idf_map\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term],2)\n cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))\n return cosinus_similarity \n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):\n ranked_documents = [] \n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)\n simularity = context_simularity*referens_simularity\n if(simularity != 0):\n ranked_documents.append((document,simularity)) \n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)\n return ranked_documents",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SparkFinSpace(FinSpace):
import pyspark
<|reserved_special_token_0|>
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,
dataset_id: str, change_type: str, wait_for_completion=True):
print('Uploading data...')
upload_location = self.upload_dataframe(data_frame)
print('Data upload finished. Ingesting data...')
return self.ingest_from_s3(upload_location, dataset_id, change_type,
wait_for_completion, format_type='parquet', format_params={})
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SparkFinSpace(FinSpace):
import pyspark
def __init__(self, spark: pyspark.sql.session.SparkSession=None, config
=Config(retries={'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict=None):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,
dataset_id: str, change_type: str, wait_for_completion=True):
print('Uploading data...')
upload_location = self.upload_dataframe(data_frame)
print('Data upload finished. Ingesting data...')
return self.ingest_from_s3(upload_location, dataset_id, change_type,
wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(self, dataset_id: str, view_id: str):
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
if view['status'] != 'SUCCESS':
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SparkFinSpace(FinSpace):
import pyspark
def __init__(self, spark: pyspark.sql.session.SparkSession=None, config
=Config(retries={'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict=None):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,
dataset_id: str, change_type: str, wait_for_completion=True):
print('Uploading data...')
upload_location = self.upload_dataframe(data_frame)
print('Data upload finished. Ingesting data...')
return self.ingest_from_s3(upload_location, dataset_id, change_type,
wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(self, dataset_id: str, view_id: str):
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
if view['status'] != 'SUCCESS':
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame
):
from pyspark.sql.types import StructType
DoubleType = 'DOUBLE'
FloatType = 'FLOAT'
DateType = 'DATE'
StringType = 'STRING'
IntegerType = 'INTEGER'
LongType = 'BIGINT'
BooleanType = 'BOOLEAN'
TimestampType = 'DATETIME'
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,
'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':
FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,
'NullType': StringType, 'ShortType': IntegerType, 'StringType':
StringType, 'TimestampType': TimestampType}
for i in items:
habType = switcher.get(str(i.dataType), StringType)
hab_columns.append({'dataType': habType, 'name': i.name,
'description': ''})
return hab_columns
<|reserved_special_token_1|>
import datetime
import time
import boto3
from botocore.config import Config
class SparkFinSpace(FinSpace):
import pyspark
def __init__(self, spark: pyspark.sql.session.SparkSession=None, config
=Config(retries={'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict=None):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,
dataset_id: str, change_type: str, wait_for_completion=True):
print('Uploading data...')
upload_location = self.upload_dataframe(data_frame)
print('Data upload finished. Ingesting data...')
return self.ingest_from_s3(upload_location, dataset_id, change_type,
wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(self, dataset_id: str, view_id: str):
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
if view['status'] != 'SUCCESS':
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame
):
from pyspark.sql.types import StructType
DoubleType = 'DOUBLE'
FloatType = 'FLOAT'
DateType = 'DATE'
StringType = 'STRING'
IntegerType = 'INTEGER'
LongType = 'BIGINT'
BooleanType = 'BOOLEAN'
TimestampType = 'DATETIME'
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,
'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':
FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,
'NullType': StringType, 'ShortType': IntegerType, 'StringType':
StringType, 'TimestampType': TimestampType}
for i in items:
habType = switcher.get(str(i.dataType), StringType)
hab_columns.append({'dataType': habType, 'name': i.name,
'description': ''})
return hab_columns
<|reserved_special_token_1|>
import datetime
import time
import boto3
from botocore.config import Config
# FinSpace class with Spark bindings
class SparkFinSpace(FinSpace):
import pyspark
def __init__(
self,
spark: pyspark.sql.session.SparkSession = None,
config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict = None
):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
# data_frame.write.option('header', 'true').csv(upload_location)
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):
print("Uploading data...")
upload_location = self.upload_dataframe(data_frame)
print("Data upload finished. Ingesting data...")
return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(
self,
dataset_id: str,
view_id: str
):
# TODO: switch to DescribeMatz when available in HFS
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
# 0. Ensure view is ready to be read
if (view['status'] != 'SUCCESS'):
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
# Query Glue table directly with catalog function of spark
return self.spark.table(f"`{glue_db_name}`.`{glue_table_name}`")
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):
from pyspark.sql.types import StructType
# for translation to FinSpace's schema
# 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'
DoubleType = "DOUBLE"
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {
"BinaryType" : StringType,
"BooleanType" : BooleanType,
"ByteType" : IntegerType,
"DateType" : DateType,
"DoubleType" : FloatType,
"IntegerType" : IntegerType,
"LongType" : IntegerType,
"NullType" : StringType,
"ShortType" : IntegerType,
"StringType" : StringType,
"TimestampType" : TimestampType,
}
for i in items:
# print( f"name: {i.name} type: {i.dataType}" )
habType = switcher.get( str(i.dataType), StringType)
hab_columns.append({
"dataType" : habType,
"name" : i.name,
"description" : ""
})
return( hab_columns )
|
flexible
|
{
"blob_id": "4f4af4caf81397542e9cd94c50b54303e2f81881",
"index": 3926,
"step-1": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n <mask token>\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n\n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame\n ):\n from pyspark.sql.types import StructType\n DoubleType = 'DOUBLE'\n FloatType = 'FLOAT'\n DateType = 'DATE'\n StringType = 'STRING'\n IntegerType = 'INTEGER'\n LongType = 'BIGINT'\n BooleanType = 'BOOLEAN'\n TimestampType = 'DATETIME'\n hab_columns = []\n items = [i for i in data_frame.schema]\n switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,\n 'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':\n FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,\n 'NullType': StringType, 'ShortType': IntegerType, 'StringType':\n StringType, 'TimestampType': TimestampType}\n for i in items:\n habType = switcher.get(str(i.dataType), StringType)\n hab_columns.append({'dataType': habType, 'name': i.name,\n 'description': ''})\n return hab_columns\n",
"step-4": "import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n\n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame\n ):\n from pyspark.sql.types import StructType\n DoubleType = 'DOUBLE'\n FloatType = 'FLOAT'\n DateType = 'DATE'\n StringType = 'STRING'\n IntegerType = 'INTEGER'\n LongType = 'BIGINT'\n BooleanType = 'BOOLEAN'\n TimestampType = 'DATETIME'\n hab_columns = []\n items = [i for i in data_frame.schema]\n switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,\n 'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':\n FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,\n 'NullType': StringType, 'ShortType': IntegerType, 'StringType':\n StringType, 'TimestampType': TimestampType}\n for i in items:\n habType = switcher.get(str(i.dataType), StringType)\n hab_columns.append({'dataType': habType, 'name': i.name,\n 'description': ''})\n return hab_columns\n",
"step-5": "import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n# FinSpace class with Spark bindings\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n def __init__(\n self, \n spark: pyspark.sql.session.SparkSession = None,\n config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict = None\n ):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames\n \n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n# data_frame.write.option('header', 'true').csv(upload_location)\n data_frame.write.parquet(upload_location)\n return upload_location\n \n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):\n print(\"Uploading data...\")\n upload_location = self.upload_dataframe(data_frame)\n \n print(\"Data upload finished. Ingesting data...\")\n \n return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})\n \n def read_view_as_spark(\n self,\n dataset_id: str,\n view_id: str\n ):\n # TODO: switch to DescribeMatz when available in HFS\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n \n # 0. Ensure view is ready to be read\n if (view['status'] != 'SUCCESS'): \n status = view['status'] \n print(f'view run status is not ready: {status}. Returning empty.')\n return\n\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n \n # Query Glue table directly with catalog function of spark\n return self.spark.table(f\"`{glue_db_name}`.`{glue_table_name}`\")\n \n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):\n from pyspark.sql.types import StructType\n\n # for translation to FinSpace's schema\n # 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'\n DoubleType = \"DOUBLE\"\n FloatType = \"FLOAT\"\n DateType = \"DATE\"\n StringType = \"STRING\"\n IntegerType = \"INTEGER\"\n LongType = \"BIGINT\"\n BooleanType = \"BOOLEAN\"\n TimestampType = \"DATETIME\"\n \n hab_columns = []\n\n items = [i for i in data_frame.schema] \n\n switcher = {\n \"BinaryType\" : StringType,\n \"BooleanType\" : BooleanType,\n \"ByteType\" : IntegerType,\n \"DateType\" : DateType,\n \"DoubleType\" : FloatType,\n \"IntegerType\" : IntegerType,\n \"LongType\" : IntegerType,\n \"NullType\" : StringType,\n \"ShortType\" : IntegerType,\n \"StringType\" : StringType,\n \"TimestampType\" : TimestampType,\n }\n\n \n for i in items:\n# print( f\"name: {i.name} type: {i.dataType}\" )\n\n habType = switcher.get( str(i.dataType), StringType)\n\n hab_columns.append({\n \"dataType\" : habType, \n \"name\" : i.name,\n \"description\" : \"\"\n })\n\n return( hab_columns )\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isValid(s):
if not s:
return True
x = Counter(s)
print(x)
first_c = x.pop(s[0])
cnt = 0
for k, c in x.items():
if c != first_c:
if first_c == 1:
cnt += 1
first_c = c
else:
cnt += abs(c - first_c) if c != 1 else 1
if cnt >= 2:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isValid(s):
if not s:
return True
x = Counter(s)
print(x)
first_c = x.pop(s[0])
cnt = 0
for k, c in x.items():
if c != first_c:
if first_c == 1:
cnt += 1
first_c = c
else:
cnt += abs(c - first_c) if c != 1 else 1
if cnt >= 2:
return False
return True
if __name__ == '__main__':
s = 'ibfdgaeadiaefgbhbdghhhbgdfgeiccbi'
r = isValid(s)
print(r)
<|reserved_special_token_1|>
from collections import Counter
def isValid(s):
if not s:
return True
x = Counter(s)
print(x)
first_c = x.pop(s[0])
cnt = 0
for k, c in x.items():
if c != first_c:
if first_c == 1:
cnt += 1
first_c = c
else:
cnt += abs(c - first_c) if c != 1 else 1
if cnt >= 2:
return False
return True
if __name__ == '__main__':
s = 'ibfdgaeadiaefgbhbdghhhbgdfgeiccbi'
r = isValid(s)
print(r)
<|reserved_special_token_1|>
from collections import Counter
# Complete the isValid function below.
def isValid(s):
if not s:
return True
x = Counter(s)
print(x)
first_c = x.pop(s[0])
cnt = 0
for k, c in x.items():
if c != first_c:
if first_c == 1:
cnt += 1
first_c = c
else:
cnt += abs(c - first_c) if c != 1 else 1
if cnt >= 2:
return False
return True
if __name__ == '__main__':
s = "ibfdgaeadiaefgbhbdghhhbgdfgeiccbi"
r = isValid(s)
print(r)
|
flexible
|
{
"blob_id": "760daa908ca92e7fb1393bdf28fee086dc1648ef",
"index": 6418,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef isValid(s):\n if not s:\n return True\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isValid(s):\n if not s:\n return True\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = 'ibfdgaeadiaefgbhbdghhhbgdfgeiccbi'\n r = isValid(s)\n print(r)\n",
"step-4": "from collections import Counter\n\n\ndef isValid(s):\n if not s:\n return True\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = 'ibfdgaeadiaefgbhbdghhhbgdfgeiccbi'\n r = isValid(s)\n print(r)\n",
"step-5": "from collections import Counter\n\n\n# Complete the isValid function below.\ndef isValid(s):\n if not s:\n return True\n\n x = Counter(s)\n print(x)\n first_c = x.pop(s[0])\n cnt = 0\n for k, c in x.items():\n if c != first_c:\n if first_c == 1:\n cnt += 1\n first_c = c\n else:\n cnt += abs(c - first_c) if c != 1 else 1\n if cnt >= 2:\n return False\n return True\n\n\nif __name__ == '__main__':\n s = \"ibfdgaeadiaefgbhbdghhhbgdfgeiccbi\"\n r = isValid(s)\n print(r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class ListNode:
def __init__(self, value = 0, next = None):
self.value = value
self.next = next
def count(node: ListNode) -> int:
if node is None:
return 0
else:
return count(node.next) + 1
# Test Cases
LL1 = ListNode(1, ListNode(4, ListNode(5)))
print(count(None)) # 0
print(count(LL1)) # 3
print(count(ListNode())) # 1
|
normal
|
{
"blob_id": "8c6169bd812a5f34693b12ce2c886969542f1ab8",
"index": 2352,
"step-1": "class ListNode:\n\n def __init__(self, value=0, next=None):\n self.value = value\n self.next = next\n\n\n<mask token>\n",
"step-2": "class ListNode:\n\n def __init__(self, value=0, next=None):\n self.value = value\n self.next = next\n\n\ndef count(node: ListNode) ->int:\n if node is None:\n return 0\n else:\n return count(node.next) + 1\n\n\n<mask token>\n",
"step-3": "class ListNode:\n\n def __init__(self, value=0, next=None):\n self.value = value\n self.next = next\n\n\ndef count(node: ListNode) ->int:\n if node is None:\n return 0\n else:\n return count(node.next) + 1\n\n\n<mask token>\nprint(count(None))\nprint(count(LL1))\nprint(count(ListNode()))\n",
"step-4": "class ListNode:\n\n def __init__(self, value=0, next=None):\n self.value = value\n self.next = next\n\n\ndef count(node: ListNode) ->int:\n if node is None:\n return 0\n else:\n return count(node.next) + 1\n\n\nLL1 = ListNode(1, ListNode(4, ListNode(5)))\nprint(count(None))\nprint(count(LL1))\nprint(count(ListNode()))\n",
"step-5": "class ListNode:\n def __init__(self, value = 0, next = None): \n self.value = value\n self.next = next\n \ndef count(node: ListNode) -> int:\n if node is None:\n return 0\n else:\n return count(node.next) + 1\n \n\n# Test Cases\nLL1 = ListNode(1, ListNode(4, ListNode(5)))\nprint(count(None)) # 0\nprint(count(LL1)) # 3\nprint(count(ListNode())) # 1\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class GameOfLife:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class GameOfLife:
@staticmethod
def simulate(board):
for row in range(len(board)):
for col in range(len(board[0])):
ones = GameOfLife.countOnes(board, row, col)
if board[row][col] and (ones == 2 or ones == 3):
board[row][col] |= 2
elif not board[row][col] and ones == 3:
board[row][col] |= 2
for row in range(len(board)):
for col in range(len(board[0])):
board[row][col] >>= 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class GameOfLife:
@staticmethod
def simulate(board):
for row in range(len(board)):
for col in range(len(board[0])):
ones = GameOfLife.countOnes(board, row, col)
if board[row][col] and (ones == 2 or ones == 3):
board[row][col] |= 2
elif not board[row][col] and ones == 3:
board[row][col] |= 2
for row in range(len(board)):
for col in range(len(board[0])):
board[row][col] >>= 1
@staticmethod
def countOnes(board, row, col):
total = 0
total += GameOfLife.isOne(board, row - 1, col - 1)
total += GameOfLife.isOne(board, row - 1, col)
total += GameOfLife.isOne(board, row - 1, col + 1)
total += GameOfLife.isOne(board, row, col - 1)
total += GameOfLife.isOne(board, row, col + 1)
total += GameOfLife.isOne(board, row + 1, col - 1)
total += GameOfLife.isOne(board, row + 1, col)
total += GameOfLife.isOne(board, row + 1, col + 1)
return total
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class GameOfLife:
@staticmethod
def simulate(board):
for row in range(len(board)):
for col in range(len(board[0])):
ones = GameOfLife.countOnes(board, row, col)
if board[row][col] and (ones == 2 or ones == 3):
board[row][col] |= 2
elif not board[row][col] and ones == 3:
board[row][col] |= 2
for row in range(len(board)):
for col in range(len(board[0])):
board[row][col] >>= 1
@staticmethod
def countOnes(board, row, col):
total = 0
total += GameOfLife.isOne(board, row - 1, col - 1)
total += GameOfLife.isOne(board, row - 1, col)
total += GameOfLife.isOne(board, row - 1, col + 1)
total += GameOfLife.isOne(board, row, col - 1)
total += GameOfLife.isOne(board, row, col + 1)
total += GameOfLife.isOne(board, row + 1, col - 1)
total += GameOfLife.isOne(board, row + 1, col)
total += GameOfLife.isOne(board, row + 1, col + 1)
return total
@staticmethod
def isOne(board, row, col):
if row >= len(board) or row < 0:
return 0
if col >= len(board) or col < 0:
return 0
return board[row][col] & 1
|
flexible
|
{
"blob_id": "862c5794a4da794678de419f053ae15b11bca6e7",
"index": 7453,
"step-1": "class GameOfLife:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class GameOfLife:\n\n @staticmethod\n def simulate(board):\n for row in range(len(board)):\n for col in range(len(board[0])):\n ones = GameOfLife.countOnes(board, row, col)\n if board[row][col] and (ones == 2 or ones == 3):\n board[row][col] |= 2\n elif not board[row][col] and ones == 3:\n board[row][col] |= 2\n for row in range(len(board)):\n for col in range(len(board[0])):\n board[row][col] >>= 1\n <mask token>\n <mask token>\n",
"step-3": "class GameOfLife:\n\n @staticmethod\n def simulate(board):\n for row in range(len(board)):\n for col in range(len(board[0])):\n ones = GameOfLife.countOnes(board, row, col)\n if board[row][col] and (ones == 2 or ones == 3):\n board[row][col] |= 2\n elif not board[row][col] and ones == 3:\n board[row][col] |= 2\n for row in range(len(board)):\n for col in range(len(board[0])):\n board[row][col] >>= 1\n\n @staticmethod\n def countOnes(board, row, col):\n total = 0\n total += GameOfLife.isOne(board, row - 1, col - 1)\n total += GameOfLife.isOne(board, row - 1, col)\n total += GameOfLife.isOne(board, row - 1, col + 1)\n total += GameOfLife.isOne(board, row, col - 1)\n total += GameOfLife.isOne(board, row, col + 1)\n total += GameOfLife.isOne(board, row + 1, col - 1)\n total += GameOfLife.isOne(board, row + 1, col)\n total += GameOfLife.isOne(board, row + 1, col + 1)\n return total\n <mask token>\n",
"step-4": "class GameOfLife:\n\n @staticmethod\n def simulate(board):\n for row in range(len(board)):\n for col in range(len(board[0])):\n ones = GameOfLife.countOnes(board, row, col)\n if board[row][col] and (ones == 2 or ones == 3):\n board[row][col] |= 2\n elif not board[row][col] and ones == 3:\n board[row][col] |= 2\n for row in range(len(board)):\n for col in range(len(board[0])):\n board[row][col] >>= 1\n\n @staticmethod\n def countOnes(board, row, col):\n total = 0\n total += GameOfLife.isOne(board, row - 1, col - 1)\n total += GameOfLife.isOne(board, row - 1, col)\n total += GameOfLife.isOne(board, row - 1, col + 1)\n total += GameOfLife.isOne(board, row, col - 1)\n total += GameOfLife.isOne(board, row, col + 1)\n total += GameOfLife.isOne(board, row + 1, col - 1)\n total += GameOfLife.isOne(board, row + 1, col)\n total += GameOfLife.isOne(board, row + 1, col + 1)\n return total\n\n @staticmethod\n def isOne(board, row, col):\n if row >= len(board) or row < 0:\n return 0\n if col >= len(board) or col < 0:\n return 0\n return board[row][col] & 1\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from collections import Counter
from docx import Document
import docx2txt
plain_text = docx2txt.process("kashmiri.docx")
list_of_words = plain_text.split()
#print(Counter(list_of_words))
counter_list_of_words = Counter(list_of_words)
elements = counter_list_of_words.items()
# for a, b in sorted(elements, key=lambda x: x[1], reverse=True):
# print(a)
# print(b)
doc = Document()
# Create and Name Table Heading
table = doc.add_table(rows=1, cols=2)
cell1 = table.cell(0, 0)
cell1.text = 'Word'
cell2 = table.cell(0, 1)
cell2.text = 'Frequency'
#Iterate over collection elements and append to table craeted
for word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):
cell = table.add_row().cells
cell[0].text = str(word)
cell[1].text = str(frequency)
doc.save("results.docx")
|
normal
|
{
"blob_id": "9ad36f157abae849a1550cb96e650746d57f491d",
"index": 9732,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save('results.docx')\n",
"step-3": "<mask token>\nplain_text = docx2txt.process('kashmiri.docx')\nlist_of_words = plain_text.split()\ncounter_list_of_words = Counter(list_of_words)\nelements = counter_list_of_words.items()\ndoc = Document()\ntable = doc.add_table(rows=1, cols=2)\ncell1 = table.cell(0, 0)\ncell1.text = 'Word'\ncell2 = table.cell(0, 1)\ncell2.text = 'Frequency'\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save('results.docx')\n",
"step-4": "from collections import Counter\nfrom docx import Document\nimport docx2txt\nplain_text = docx2txt.process('kashmiri.docx')\nlist_of_words = plain_text.split()\ncounter_list_of_words = Counter(list_of_words)\nelements = counter_list_of_words.items()\ndoc = Document()\ntable = doc.add_table(rows=1, cols=2)\ncell1 = table.cell(0, 0)\ncell1.text = 'Word'\ncell2 = table.cell(0, 1)\ncell2.text = 'Frequency'\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save('results.docx')\n",
"step-5": "from collections import Counter\nfrom docx import Document\nimport docx2txt\n\nplain_text = docx2txt.process(\"kashmiri.docx\")\nlist_of_words = plain_text.split()\n#print(Counter(list_of_words))\ncounter_list_of_words = Counter(list_of_words)\nelements = counter_list_of_words.items()\n# for a, b in sorted(elements, key=lambda x: x[1], reverse=True):\n# print(a)\n# print(b)\n\ndoc = Document()\n# Create and Name Table Heading\ntable = doc.add_table(rows=1, cols=2)\ncell1 = table.cell(0, 0)\ncell1.text = 'Word'\ncell2 = table.cell(0, 1)\ncell2.text = 'Frequency'\n\n#Iterate over collection elements and append to table craeted\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save(\"results.docx\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def parsing_ethernet_header(data):
ethernet_header = struct.unpack('!6c6c2s', data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header = '0x' + ethernet_header[12].hex()
print('=========ethernet header==========')
print('src_mac_address:', ether_src)
print('dest_mac_address:', ether_dest)
print('ip_version', ip_header)
def convert_ethernet_address(data):
ethernet_addr = list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr = ':'.join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)
print('============ip header=============')
ip_ver_len = int(ip_header[0].hex(), 16)
print('ip_version:', ip_ver_len // 16)
print('ip_length:', ip_ver_len % 16)
differ_expli = int(ip_header[1].hex(), 16)
print('differentiated_service_codepoint:', differ_expli // 16)
print('explicit_congestion_notification:', differ_expli % 16)
total_length = int(ip_header[2].hex(), 16)
print('total_length:', total_length)
identification = ip_header[3].hex()
print('identification:0x', identification)
flags = ip_header[4].hex()
print('flags:0x', flags)
flags_int = int(ip_header[4].hex(), 16)
print('>>>reserved_bit:', flags_int >> 15)
print('>>>fragments:', flags_int >> 13 & 1)
print('>>>fragments_offset:', flags_int & 8191)
time_to_live = int(ip_header[5].hex(), 16)
print('Time to live:', time_to_live)
protocol = ip_header[6].hex()
print('protocol:0x', protocol)
header_check = ip_header[7].hex()
print('header checksum:0x', header_check)
source_addr = convert_ip_address(ip_header[8:12])
print('source_ip_address:', source_addr)
dest_addr = convert_ip_address(ip_header[12:16])
print('dest_ip_address:', dest_addr)
def ch_UDP_TCP(data):
temp = struct.unpack('1c', data)
result = int(temp[0].hex(), 16)
return result
def convert_ip_address(data):
ip_addr = list()
for i in data:
ip_addr.append(str(int(i.hex(), 16)))
ip_addr = '.'.join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print('=============tcp header==============')
TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)
src_port = int(TCP_header[0].hex(), 16)
print('src_port:', src_port)
dec_port = int(TCP_header[1].hex(), 16)
print('dec_port:', dec_port)
seq_num = TCP_header[2]
print('seq_num:', seq_num)
ack_num = TCP_header[3]
print('ack_num:', ack_num)
header_len = int(TCP_header[4].hex(), 16) >> 12 & 15
print('header_len:', header_len)
flags = int(TCP_header[4].hex(), 16) & 4095
print('flags:', flags)
reserved = flags >> 9
print('>>>reserved', reserved)
nonce = flags >> 8 & 1
print('>>>nonce:', nonce)
cwr = flags >> 7 & 1
print('>>>cwr:', cwr)
urgent = flags >> 5 & 1
print('>>>urgent:', urgent)
ack = flags >> 4 & 1
print('>>>ack:', ack)
push = flags >> 3 & 1
print('>>>push:', push)
reset = flags >> 2 & 1
print('>>>reset:', reset)
syn = flags >> 1 & 1
print('>>>syn:', syn)
fin = flags & 1
print('>>>fin:', fin)
window_size = int(TCP_header[5].hex(), 16)
print('Window_size_value:', window_size)
checksum = int(TCP_header[6].hex(), 16)
print('checksum:', checksum)
urgent_pointer = int(TCP_header[7].hex(), 16)
print('urgent_pointer:', urgent_pointer)
def parsing_UDP_header(data):
UDP_header = struct.unpack('2s2s2s2s', data)
print('=============udp_header=============')
src_port = int(UDP_header[0].hex(), 16)
print('src_port:', src_port)
dst_port = int(UDP_header[1].hex(), 16)
print('dst_port:', dst_port)
leng = int(UDP_header[2].hex(), 16)
print('leng:', leng)
header_checksum = UDP_header[3].hex()
print('header_checksum:0x', header_checksum)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parsing_ethernet_header(data):
ethernet_header = struct.unpack('!6c6c2s', data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header = '0x' + ethernet_header[12].hex()
print('=========ethernet header==========')
print('src_mac_address:', ether_src)
print('dest_mac_address:', ether_dest)
print('ip_version', ip_header)
def convert_ethernet_address(data):
ethernet_addr = list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr = ':'.join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)
print('============ip header=============')
ip_ver_len = int(ip_header[0].hex(), 16)
print('ip_version:', ip_ver_len // 16)
print('ip_length:', ip_ver_len % 16)
differ_expli = int(ip_header[1].hex(), 16)
print('differentiated_service_codepoint:', differ_expli // 16)
print('explicit_congestion_notification:', differ_expli % 16)
total_length = int(ip_header[2].hex(), 16)
print('total_length:', total_length)
identification = ip_header[3].hex()
print('identification:0x', identification)
flags = ip_header[4].hex()
print('flags:0x', flags)
flags_int = int(ip_header[4].hex(), 16)
print('>>>reserved_bit:', flags_int >> 15)
print('>>>fragments:', flags_int >> 13 & 1)
print('>>>fragments_offset:', flags_int & 8191)
time_to_live = int(ip_header[5].hex(), 16)
print('Time to live:', time_to_live)
protocol = ip_header[6].hex()
print('protocol:0x', protocol)
header_check = ip_header[7].hex()
print('header checksum:0x', header_check)
source_addr = convert_ip_address(ip_header[8:12])
print('source_ip_address:', source_addr)
dest_addr = convert_ip_address(ip_header[12:16])
print('dest_ip_address:', dest_addr)
def ch_UDP_TCP(data):
temp = struct.unpack('1c', data)
result = int(temp[0].hex(), 16)
return result
def convert_ip_address(data):
ip_addr = list()
for i in data:
ip_addr.append(str(int(i.hex(), 16)))
ip_addr = '.'.join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print('=============tcp header==============')
TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)
src_port = int(TCP_header[0].hex(), 16)
print('src_port:', src_port)
dec_port = int(TCP_header[1].hex(), 16)
print('dec_port:', dec_port)
seq_num = TCP_header[2]
print('seq_num:', seq_num)
ack_num = TCP_header[3]
print('ack_num:', ack_num)
header_len = int(TCP_header[4].hex(), 16) >> 12 & 15
print('header_len:', header_len)
flags = int(TCP_header[4].hex(), 16) & 4095
print('flags:', flags)
reserved = flags >> 9
print('>>>reserved', reserved)
nonce = flags >> 8 & 1
print('>>>nonce:', nonce)
cwr = flags >> 7 & 1
print('>>>cwr:', cwr)
urgent = flags >> 5 & 1
print('>>>urgent:', urgent)
ack = flags >> 4 & 1
print('>>>ack:', ack)
push = flags >> 3 & 1
print('>>>push:', push)
reset = flags >> 2 & 1
print('>>>reset:', reset)
syn = flags >> 1 & 1
print('>>>syn:', syn)
fin = flags & 1
print('>>>fin:', fin)
window_size = int(TCP_header[5].hex(), 16)
print('Window_size_value:', window_size)
checksum = int(TCP_header[6].hex(), 16)
print('checksum:', checksum)
urgent_pointer = int(TCP_header[7].hex(), 16)
print('urgent_pointer:', urgent_pointer)
def parsing_UDP_header(data):
UDP_header = struct.unpack('2s2s2s2s', data)
print('=============udp_header=============')
src_port = int(UDP_header[0].hex(), 16)
print('src_port:', src_port)
dst_port = int(UDP_header[1].hex(), 16)
print('dst_port:', dst_port)
leng = int(UDP_header[2].hex(), 16)
print('leng:', leng)
header_checksum = UDP_header[3].hex()
print('header_checksum:0x', header_checksum)
<|reserved_special_token_0|>
print('<<<<<<Packet Capture Start>>>>>>>')
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag = ch_UDP_TCP(data[0][23:24])
if flag == 6:
parsing_TCP_header(data[0][34:54])
elif flag == 17:
parsing_UDP_header(data[0][34:42])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parsing_ethernet_header(data):
ethernet_header = struct.unpack('!6c6c2s', data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header = '0x' + ethernet_header[12].hex()
print('=========ethernet header==========')
print('src_mac_address:', ether_src)
print('dest_mac_address:', ether_dest)
print('ip_version', ip_header)
def convert_ethernet_address(data):
ethernet_addr = list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr = ':'.join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)
print('============ip header=============')
ip_ver_len = int(ip_header[0].hex(), 16)
print('ip_version:', ip_ver_len // 16)
print('ip_length:', ip_ver_len % 16)
differ_expli = int(ip_header[1].hex(), 16)
print('differentiated_service_codepoint:', differ_expli // 16)
print('explicit_congestion_notification:', differ_expli % 16)
total_length = int(ip_header[2].hex(), 16)
print('total_length:', total_length)
identification = ip_header[3].hex()
print('identification:0x', identification)
flags = ip_header[4].hex()
print('flags:0x', flags)
flags_int = int(ip_header[4].hex(), 16)
print('>>>reserved_bit:', flags_int >> 15)
print('>>>fragments:', flags_int >> 13 & 1)
print('>>>fragments_offset:', flags_int & 8191)
time_to_live = int(ip_header[5].hex(), 16)
print('Time to live:', time_to_live)
protocol = ip_header[6].hex()
print('protocol:0x', protocol)
header_check = ip_header[7].hex()
print('header checksum:0x', header_check)
source_addr = convert_ip_address(ip_header[8:12])
print('source_ip_address:', source_addr)
dest_addr = convert_ip_address(ip_header[12:16])
print('dest_ip_address:', dest_addr)
def ch_UDP_TCP(data):
temp = struct.unpack('1c', data)
result = int(temp[0].hex(), 16)
return result
def convert_ip_address(data):
ip_addr = list()
for i in data:
ip_addr.append(str(int(i.hex(), 16)))
ip_addr = '.'.join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print('=============tcp header==============')
TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)
src_port = int(TCP_header[0].hex(), 16)
print('src_port:', src_port)
dec_port = int(TCP_header[1].hex(), 16)
print('dec_port:', dec_port)
seq_num = TCP_header[2]
print('seq_num:', seq_num)
ack_num = TCP_header[3]
print('ack_num:', ack_num)
header_len = int(TCP_header[4].hex(), 16) >> 12 & 15
print('header_len:', header_len)
flags = int(TCP_header[4].hex(), 16) & 4095
print('flags:', flags)
reserved = flags >> 9
print('>>>reserved', reserved)
nonce = flags >> 8 & 1
print('>>>nonce:', nonce)
cwr = flags >> 7 & 1
print('>>>cwr:', cwr)
urgent = flags >> 5 & 1
print('>>>urgent:', urgent)
ack = flags >> 4 & 1
print('>>>ack:', ack)
push = flags >> 3 & 1
print('>>>push:', push)
reset = flags >> 2 & 1
print('>>>reset:', reset)
syn = flags >> 1 & 1
print('>>>syn:', syn)
fin = flags & 1
print('>>>fin:', fin)
window_size = int(TCP_header[5].hex(), 16)
print('Window_size_value:', window_size)
checksum = int(TCP_header[6].hex(), 16)
print('checksum:', checksum)
urgent_pointer = int(TCP_header[7].hex(), 16)
print('urgent_pointer:', urgent_pointer)
def parsing_UDP_header(data):
UDP_header = struct.unpack('2s2s2s2s', data)
print('=============udp_header=============')
src_port = int(UDP_header[0].hex(), 16)
print('src_port:', src_port)
dst_port = int(UDP_header[1].hex(), 16)
print('dst_port:', dst_port)
leng = int(UDP_header[2].hex(), 16)
print('leng:', leng)
header_checksum = UDP_header[3].hex()
print('header_checksum:0x', header_checksum)
recv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs
(2048))
print('<<<<<<Packet Capture Start>>>>>>>')
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag = ch_UDP_TCP(data[0][23:24])
if flag == 6:
parsing_TCP_header(data[0][34:54])
elif flag == 17:
parsing_UDP_header(data[0][34:42])
<|reserved_special_token_1|>
import socket
import struct
def parsing_ethernet_header(data):
ethernet_header = struct.unpack('!6c6c2s', data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header = '0x' + ethernet_header[12].hex()
print('=========ethernet header==========')
print('src_mac_address:', ether_src)
print('dest_mac_address:', ether_dest)
print('ip_version', ip_header)
def convert_ethernet_address(data):
ethernet_addr = list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr = ':'.join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)
print('============ip header=============')
ip_ver_len = int(ip_header[0].hex(), 16)
print('ip_version:', ip_ver_len // 16)
print('ip_length:', ip_ver_len % 16)
differ_expli = int(ip_header[1].hex(), 16)
print('differentiated_service_codepoint:', differ_expli // 16)
print('explicit_congestion_notification:', differ_expli % 16)
total_length = int(ip_header[2].hex(), 16)
print('total_length:', total_length)
identification = ip_header[3].hex()
print('identification:0x', identification)
flags = ip_header[4].hex()
print('flags:0x', flags)
flags_int = int(ip_header[4].hex(), 16)
print('>>>reserved_bit:', flags_int >> 15)
print('>>>fragments:', flags_int >> 13 & 1)
print('>>>fragments_offset:', flags_int & 8191)
time_to_live = int(ip_header[5].hex(), 16)
print('Time to live:', time_to_live)
protocol = ip_header[6].hex()
print('protocol:0x', protocol)
header_check = ip_header[7].hex()
print('header checksum:0x', header_check)
source_addr = convert_ip_address(ip_header[8:12])
print('source_ip_address:', source_addr)
dest_addr = convert_ip_address(ip_header[12:16])
print('dest_ip_address:', dest_addr)
def ch_UDP_TCP(data):
temp = struct.unpack('1c', data)
result = int(temp[0].hex(), 16)
return result
def convert_ip_address(data):
ip_addr = list()
for i in data:
ip_addr.append(str(int(i.hex(), 16)))
ip_addr = '.'.join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print('=============tcp header==============')
TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)
src_port = int(TCP_header[0].hex(), 16)
print('src_port:', src_port)
dec_port = int(TCP_header[1].hex(), 16)
print('dec_port:', dec_port)
seq_num = TCP_header[2]
print('seq_num:', seq_num)
ack_num = TCP_header[3]
print('ack_num:', ack_num)
header_len = int(TCP_header[4].hex(), 16) >> 12 & 15
print('header_len:', header_len)
flags = int(TCP_header[4].hex(), 16) & 4095
print('flags:', flags)
reserved = flags >> 9
print('>>>reserved', reserved)
nonce = flags >> 8 & 1
print('>>>nonce:', nonce)
cwr = flags >> 7 & 1
print('>>>cwr:', cwr)
urgent = flags >> 5 & 1
print('>>>urgent:', urgent)
ack = flags >> 4 & 1
print('>>>ack:', ack)
push = flags >> 3 & 1
print('>>>push:', push)
reset = flags >> 2 & 1
print('>>>reset:', reset)
syn = flags >> 1 & 1
print('>>>syn:', syn)
fin = flags & 1
print('>>>fin:', fin)
window_size = int(TCP_header[5].hex(), 16)
print('Window_size_value:', window_size)
checksum = int(TCP_header[6].hex(), 16)
print('checksum:', checksum)
urgent_pointer = int(TCP_header[7].hex(), 16)
print('urgent_pointer:', urgent_pointer)
def parsing_UDP_header(data):
UDP_header = struct.unpack('2s2s2s2s', data)
print('=============udp_header=============')
src_port = int(UDP_header[0].hex(), 16)
print('src_port:', src_port)
dst_port = int(UDP_header[1].hex(), 16)
print('dst_port:', dst_port)
leng = int(UDP_header[2].hex(), 16)
print('leng:', leng)
header_checksum = UDP_header[3].hex()
print('header_checksum:0x', header_checksum)
recv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs
(2048))
print('<<<<<<Packet Capture Start>>>>>>>')
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag = ch_UDP_TCP(data[0][23:24])
if flag == 6:
parsing_TCP_header(data[0][34:54])
elif flag == 17:
parsing_UDP_header(data[0][34:42])
<|reserved_special_token_1|>
import socket
import struct
def parsing_ethernet_header(data):
ethernet_header=struct.unpack("!6c6c2s",data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header="0x"+ethernet_header[12].hex()
print("=========ethernet header==========")
print("src_mac_address:", ether_src)
print("dest_mac_address:",ether_dest)
print("ip_version",ip_header)
def convert_ethernet_address(data):
ethernet_addr =list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr=":".join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header=struct.unpack("!1c1c2s2s2s1c1c2s4c4c",data)
print("============ip header=============")
ip_ver_len= int(ip_header[0].hex(), 16)
print("ip_version:",ip_ver_len // 16)
print("ip_length:", ip_ver_len % 16)
differ_expli=int(ip_header[1].hex(),16)
print("differentiated_service_codepoint:",differ_expli//16)
print("explicit_congestion_notification:",differ_expli%16)
total_length=int(ip_header[2].hex(),16)
print("total_length:",total_length)
identification=ip_header[3].hex()
print("identification:0x",identification)
flags=ip_header[4].hex()
print("flags:0x",flags)
flags_int=int(ip_header[4].hex(),16)
print(">>>reserved_bit:",flags_int>>15)
print(">>>fragments:",(flags_int>>13)& 0x0001)
print(">>>fragments_offset:",flags_int & 0x1fff)
time_to_live=int(ip_header[5].hex(),16)
print("Time to live:",time_to_live)
protocol=ip_header[6].hex()
print("protocol:0x",protocol)
header_check=ip_header[7].hex()
print("header checksum:0x",header_check)
source_addr=convert_ip_address(ip_header[8:12])
print("source_ip_address:",source_addr)
dest_addr=convert_ip_address(ip_header[12:16])
print("dest_ip_address:",dest_addr)
def ch_UDP_TCP(data):
temp=struct.unpack("1c",data)
result=int(temp[0].hex(),16)
return result
def convert_ip_address(data):
ip_addr=list()
for i in data:
ip_addr.append(str(int(i.hex(),16)) )
ip_addr=".".join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print("=============tcp header==============")
TCP_header=struct.unpack("!2s2s1I1I2s2s2s2s",data)
src_port=int(TCP_header[0].hex(),16)
print("src_port:",src_port)
dec_port=int(TCP_header[1].hex(),16)
print("dec_port:",dec_port)
seq_num=TCP_header[2]
print("seq_num:",seq_num)
ack_num=TCP_header[3]
print("ack_num:",ack_num)
header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f
print("header_len:",header_len)
flags=int(TCP_header[4].hex(),16)&0x0fff
print("flags:",flags)
reserved=flags>>9
print(">>>reserved",reserved)
nonce=(flags>>8)&0x001
print(">>>nonce:",nonce)
cwr=(flags>>7)&0x001
print(">>>cwr:",cwr)
urgent=(flags>>5)&0x001
print(">>>urgent:",urgent)
ack=(flags>>4)&0x001
print(">>>ack:",ack)
push=(flags>>3)&0x001
print(">>>push:",push)
reset=(flags>>2)&0x001
print(">>>reset:",reset)
syn=(flags>>1)&0x001
print(">>>syn:",syn)
fin=flags&0x001
print(">>>fin:",fin)
window_size=int(TCP_header[5].hex(),16)
print("Window_size_value:",window_size)
checksum=int(TCP_header[6].hex(),16)
print("checksum:",checksum)
urgent_pointer=int(TCP_header[7].hex(),16)
print("urgent_pointer:",urgent_pointer)
def parsing_UDP_header(data):
UDP_header=struct.unpack("2s2s2s2s",data)
print("=============udp_header=============")
src_port=int(UDP_header[0].hex(),16)
print("src_port:",src_port)
dst_port=int(UDP_header[1].hex(),16)
print("dst_port:",dst_port)
leng=int(UDP_header[2].hex(),16)
print("leng:",leng)
header_checksum=UDP_header[3].hex()
print("header_checksum:0x",header_checksum)
recv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))
print("<<<<<<Packet Capture Start>>>>>>>")
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag =ch_UDP_TCP(data[0][23:24])
if flag==6:
parsing_TCP_header(data[0][34:54])
elif flag==17:
parsing_UDP_header(data[0][34:42])
|
flexible
|
{
"blob_id": "9b715fb95e89804a57ea77a98face673b57220c6",
"index": 4494,
"step-1": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\n<mask token>\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-3": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs\n (2048))\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-4": "import socket\nimport struct\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs\n (2048))\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-5": "import socket\nimport struct\n\ndef parsing_ethernet_header(data):\n ethernet_header=struct.unpack(\"!6c6c2s\",data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header=\"0x\"+ethernet_header[12].hex()\n\n print(\"=========ethernet header==========\")\n print(\"src_mac_address:\", ether_src)\n print(\"dest_mac_address:\",ether_dest)\n print(\"ip_version\",ip_header)\n\ndef convert_ethernet_address(data):\n ethernet_addr =list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr=\":\".join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header=struct.unpack(\"!1c1c2s2s2s1c1c2s4c4c\",data)\n \n print(\"============ip header=============\")\n \n ip_ver_len= int(ip_header[0].hex(), 16)\n print(\"ip_version:\",ip_ver_len // 16)\n print(\"ip_length:\", ip_ver_len % 16)\n\n differ_expli=int(ip_header[1].hex(),16)\n print(\"differentiated_service_codepoint:\",differ_expli//16)\n print(\"explicit_congestion_notification:\",differ_expli%16)\n\n total_length=int(ip_header[2].hex(),16)\n print(\"total_length:\",total_length)\n \n identification=ip_header[3].hex()\n print(\"identification:0x\",identification)\n\n flags=ip_header[4].hex()\n print(\"flags:0x\",flags)\n flags_int=int(ip_header[4].hex(),16)\n print(\">>>reserved_bit:\",flags_int>>15)\n print(\">>>fragments:\",(flags_int>>13)& 0x0001)\n print(\">>>fragments_offset:\",flags_int & 0x1fff)\n\n\n time_to_live=int(ip_header[5].hex(),16)\n print(\"Time to live:\",time_to_live)\n\n protocol=ip_header[6].hex()\n print(\"protocol:0x\",protocol)\n\n header_check=ip_header[7].hex()\n print(\"header checksum:0x\",header_check)\n\n source_addr=convert_ip_address(ip_header[8:12])\n print(\"source_ip_address:\",source_addr)\n\n dest_addr=convert_ip_address(ip_header[12:16])\n print(\"dest_ip_address:\",dest_addr)\n\ndef ch_UDP_TCP(data):\n temp=struct.unpack(\"1c\",data)\n result=int(temp[0].hex(),16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr=list()\n for i in data:\n ip_addr.append(str(int(i.hex(),16)) ) \n ip_addr=\".\".join(ip_addr)\n return ip_addr\n\ndef parsing_TCP_header(data):\n print(\"=============tcp header==============\")\n TCP_header=struct.unpack(\"!2s2s1I1I2s2s2s2s\",data)\n\n src_port=int(TCP_header[0].hex(),16)\n print(\"src_port:\",src_port)\n\n dec_port=int(TCP_header[1].hex(),16)\n print(\"dec_port:\",dec_port)\n\n seq_num=TCP_header[2]\n print(\"seq_num:\",seq_num)\n\n ack_num=TCP_header[3]\n print(\"ack_num:\",ack_num)\n\n header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f\n print(\"header_len:\",header_len)\n\n flags=int(TCP_header[4].hex(),16)&0x0fff\n print(\"flags:\",flags)\n\n reserved=flags>>9\n print(\">>>reserved\",reserved)\n\n nonce=(flags>>8)&0x001\n print(\">>>nonce:\",nonce)\n\n cwr=(flags>>7)&0x001\n print(\">>>cwr:\",cwr)\n\n urgent=(flags>>5)&0x001\n print(\">>>urgent:\",urgent)\n\n ack=(flags>>4)&0x001\n print(\">>>ack:\",ack)\n\n push=(flags>>3)&0x001\n print(\">>>push:\",push)\n\n reset=(flags>>2)&0x001\n print(\">>>reset:\",reset)\n\n syn=(flags>>1)&0x001\n print(\">>>syn:\",syn)\n\n fin=flags&0x001\n print(\">>>fin:\",fin)\n\n window_size=int(TCP_header[5].hex(),16)\n print(\"Window_size_value:\",window_size)\n\n checksum=int(TCP_header[6].hex(),16)\n print(\"checksum:\",checksum)\n\n urgent_pointer=int(TCP_header[7].hex(),16)\n print(\"urgent_pointer:\",urgent_pointer)\n\ndef parsing_UDP_header(data):\n UDP_header=struct.unpack(\"2s2s2s2s\",data)\n print(\"=============udp_header=============\")\n\n src_port=int(UDP_header[0].hex(),16)\n print(\"src_port:\",src_port)\n\n dst_port=int(UDP_header[1].hex(),16)\n print(\"dst_port:\",dst_port)\n\n leng=int(UDP_header[2].hex(),16)\n print(\"leng:\",leng)\n\n header_checksum=UDP_header[3].hex()\n print(\"header_checksum:0x\",header_checksum)\n\n\n\nrecv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))\n\nprint(\"<<<<<<Packet Capture Start>>>>>>>\")\n\nwhile True:\n \n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n\n flag =ch_UDP_TCP(data[0][23:24])\n \n if flag==6:\n parsing_TCP_header(data[0][34:54])\n\n elif flag==17:\n parsing_UDP_header(data[0][34:42])\n \n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def main():
daily_signal_checker('china_stocks.csv', location='chineseStocks/')
<|reserved_special_token_0|>
def daily_signal_checker(stocks, location):
ndays = 6
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
print(open_price)
df_stock = df_stock.append({'Open': open_price}, ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print('Company:', code, 'Signals:', signals)
def scrape_data(stock_list, location, start, end):
for code in stock_list['Code']:
print('Got Code:', code)
tmp = backtest_database(code, start, end, 1)
tmp.create_csv(location=location)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
daily_signal_checker('china_stocks.csv', location='chineseStocks/')
<|reserved_special_token_0|>
def daily_signal_checker(stocks, location):
ndays = 6
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
print(open_price)
df_stock = df_stock.append({'Open': open_price}, ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print('Company:', code, 'Signals:', signals)
def scrape_data(stock_list, location, start, end):
for code in stock_list['Code']:
print('Got Code:', code)
tmp = backtest_database(code, start, end, 1)
tmp.create_csv(location=location)
def test_stock_list(stock_list, location, ndays):
returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',
'Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1
).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net, num_trades, test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({'Company': code, 'No. Trades': num_trades,
'Net return': net, 'Test Error': test_error}, ignore_index=True)
print('Mean Test Error = ', np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print('Net Profit =', net_profit,
'\n Total number of companies traded =', companies_traded,
'\n Mean Profit =', mean, """
Standard Deviation""", std)
print(returns)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
daily_signal_checker('china_stocks.csv', location='chineseStocks/')
def update_portfolio():
portfolio = pd.read_csv(portfolio)
def daily_signal_checker(stocks, location):
ndays = 6
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
print(open_price)
df_stock = df_stock.append({'Open': open_price}, ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print('Company:', code, 'Signals:', signals)
def scrape_data(stock_list, location, start, end):
for code in stock_list['Code']:
print('Got Code:', code)
tmp = backtest_database(code, start, end, 1)
tmp.create_csv(location=location)
def test_stock_list(stock_list, location, ndays):
returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',
'Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1
).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net, num_trades, test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({'Company': code, 'No. Trades': num_trades,
'Net return': net, 'Test Error': test_error}, ignore_index=True)
print('Mean Test Error = ', np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print('Net Profit =', net_profit,
'\n Total number of companies traded =', companies_traded,
'\n Mean Profit =', mean, """
Standard Deviation""", std)
print(returns)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from datareader import *
import matplotlib.pyplot as plt
from plotting import *
from misc import *
import leastSquares as lsModel
import masim as mAvgSim
import numpy as np
import pandas as pd
import statistics as stat
from datetime import datetime as dt
from time import mktime
def main():
daily_signal_checker('china_stocks.csv', location='chineseStocks/')
def update_portfolio():
portfolio = pd.read_csv(portfolio)
def daily_signal_checker(stocks, location):
ndays = 6
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
print(open_price)
df_stock = df_stock.append({'Open': open_price}, ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print('Company:', code, 'Signals:', signals)
def scrape_data(stock_list, location, start, end):
for code in stock_list['Code']:
print('Got Code:', code)
tmp = backtest_database(code, start, end, 1)
tmp.create_csv(location=location)
def test_stock_list(stock_list, location, ndays):
returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',
'Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1
).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net, num_trades, test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({'Company': code, 'No. Trades': num_trades,
'Net return': net, 'Test Error': test_error}, ignore_index=True)
print('Mean Test Error = ', np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print('Net Profit =', net_profit,
'\n Total number of companies traded =', companies_traded,
'\n Mean Profit =', mean, """
Standard Deviation""", std)
print(returns)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from datareader import *
import matplotlib.pyplot as plt
from plotting import *
from misc import *
import leastSquares as lsModel
import masim as mAvgSim
import numpy as np
import pandas as pd
import statistics as stat
from datetime import datetime as dt
from time import mktime
def main():
# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',
# start='2019-09-16',end='2020-11-12')
# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)
# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')
# df_stock = pd.read_csv('603131.csv')
# df_cypt = pd.read_csv('LINK-USD.csv')
# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')
# sim = mAvgSim.movingAverageSim(df_stock)
# sim = mAvgSim.movingAverageSim(df_cypt)
# net,num_trades,test_error = sim.run_simulation(ndays=15)
# sim.plot_graph()
# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)
daily_signal_checker('china_stocks.csv',location='chineseStocks/')
# update_open_close('china_stocks.csv',location='chineseStocks/')
# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)
# df_stock = tmp.read_csv('chineseStocks/')
# open_price = tmp.get_today_open()
# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
# sim = mAvgSim.movingAverageSim(df_stock)
# sim.run_simulation(ndays=5)
# signals = sim.produce_buy_sell(ndays=1)
# print(signals)
def update_portfolio():
portfolio = pd.read_csv(portfolio)
def daily_signal_checker(stocks,location):
ndays=6
# Get updated stock prices (whole csv)
# scrape_data(pd.read_csv(stocks),location='chineseStocks/',
# start='2019-09-16',end='2020-11-24')
# Run through stock list to get opens and predict
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code,'2019-09-16','2020-11-18',1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
# print(code)
print(open_price)
df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print("Company:",code,
"Signals:",signals)
def scrape_data(stock_list,location,start,end):
for code in stock_list['Code']:
print("Got Code:",code)
tmp = backtest_database(code,start,end,1)
tmp.create_csv(location=location)
def test_stock_list(stock_list,location,ndays):
returns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net,num_trades,test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({
'Company' : code,
'No. Trades' : num_trades,
'Net return' : net,
'Test Error' : test_error
},ignore_index=True)
# print('Company:',code,'\n Number of Trades',num_trades,'\n Net % return',net)
print("Mean Test Error = ", np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print("Net Profit =",net_profit,
'\n Total number of companies traded =',companies_traded,
'\n Mean Profit =',mean,
'\n Standard Deviation',std)
print(returns)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "8d5e652fda3fb172e6faab4153bca8f78c114cd1",
"index": 7973,
"step-1": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\n<mask token>\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\n<mask token>\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\ndef update_portfolio():\n portfolio = pd.read_csv(portfolio)\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from datareader import *\nimport matplotlib.pyplot as plt\nfrom plotting import *\nfrom misc import *\nimport leastSquares as lsModel\nimport masim as mAvgSim\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nfrom datetime import datetime as dt\nfrom time import mktime\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\ndef update_portfolio():\n portfolio = pd.read_csv(portfolio)\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from datareader import *\nimport matplotlib.pyplot as plt\nfrom plotting import *\nfrom misc import *\nimport leastSquares as lsModel\nimport masim as mAvgSim\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nfrom datetime import datetime as dt\nfrom time import mktime\n\ndef main():\n\t# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',\n\t# \t\t\t\t\t\tstart='2019-09-16',end='2020-11-12')\n\t# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)\n\t# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')\n\t# df_stock = pd.read_csv('603131.csv')\n\t# df_cypt = pd.read_csv('LINK-USD.csv')\n\t# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')\n\t# sim = mAvgSim.movingAverageSim(df_stock)\n\t# sim = mAvgSim.movingAverageSim(df_cypt)\n\t# net,num_trades,test_error = sim.run_simulation(ndays=15)\n\t# sim.plot_graph()\n\t# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)\n\tdaily_signal_checker('china_stocks.csv',location='chineseStocks/')\n\t# update_open_close('china_stocks.csv',location='chineseStocks/')\n\t# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)\n\t# df_stock = tmp.read_csv('chineseStocks/')\n\t# open_price = tmp.get_today_open()\n\t# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)\n\t# sim = mAvgSim.movingAverageSim(df_stock)\n\t# sim.run_simulation(ndays=5)\n\t# signals = sim.produce_buy_sell(ndays=1)\n\t# print(signals)\n\ndef update_portfolio():\n\tportfolio = pd.read_csv(portfolio)\n\ndef daily_signal_checker(stocks,location):\n\tndays=6\n\t# Get updated stock prices (whole csv)\n\t# scrape_data(pd.read_csv(stocks),location='chineseStocks/',\n\t# \t\t\t\t\t\tstart='2019-09-16',end='2020-11-24')\n\t# Run through stock list to get opens and predict\n\tstock_list = pd.read_csv(stocks)\n\tfor code in stock_list['Code']:\n\t\ttmp = backtest_database(code,'2019-09-16','2020-11-18',1)\n\t\tdf_stock = tmp.read_csv(location=location)\n\t\topen_price = float(tmp.get_today_open())\n\t\t# print(code)\n\t\tprint(open_price)\n\t\tdf_stock = df_stock.append({'Open' : open_price},ignore_index=True)\n\t\tsim = mAvgSim.movingAverageSim(df_stock)\n\t\tsignals = sim.produce_buy_sell(ndays=ndays)\n\t\tprint(\"Company:\",code,\n\t\t\t\"Signals:\",signals)\n\ndef scrape_data(stock_list,location,start,end):\n\tfor code in stock_list['Code']:\n\t\tprint(\"Got Code:\",code)\n\t\ttmp = backtest_database(code,start,end,1)\n\t\ttmp.create_csv(location=location)\n\ndef test_stock_list(stock_list,location,ndays):\n\treturns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])\n\tfor code in stock_list['Code']:\n\t\tprint(code)\n\t\tdf_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)\n\t\tsim = mAvgSim.movingAverageSim(df_stock)\n\t\tnet,num_trades,test_error = sim.run_simulation(ndays=ndays)\n\t\tif num_trades == 0:\n\t\t\tcontinue\n\t\treturns = returns.append({\n\t\t\t'Company' : code,\n\t\t\t'No. Trades' : num_trades,\n\t\t\t'Net return' : net,\n\t\t\t'Test Error' : test_error\n\t\t},ignore_index=True)\n\t\t# print('Company:',code,'\\n Number of Trades',num_trades,'\\n Net % return',net)\n\tprint(\"Mean Test Error = \", np.mean(returns['Test Error']))\n\tnet_profit = np.sum(returns['Net return'])\n\tcompanies_traded = len(returns)\n\tmean = stat.mean(returns['Net return'])\n\tstd = stat.stdev(returns['Net return'])\n\tprint(\"Net Profit =\",net_profit,\n\t\t'\\n Total number of companies traded =',companies_traded,\n\t\t'\\n Mean Profit =',mean,\n\t\t'\\n Standard Deviation',std)\n\tprint(returns)\n\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def somaSerie(valor):
soma = 0
for i in range(valor):
soma += (i ** 2 + 1) / (i + 3)
return soma
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def somaSerie(valor):
soma = 0
for i in range(valor):
soma += (i ** 2 + 1) / (i + 3)
return soma
<|reserved_special_token_0|>
print(result)
<|reserved_special_token_1|>
def somaSerie(valor):
soma = 0
for i in range(valor):
soma += (i ** 2 + 1) / (i + 3)
return soma
a = int(input('Digite o 1º Numero :-> '))
result = somaSerie(a)
print(result)
<|reserved_special_token_1|>
def somaSerie(valor):
soma = 0
for i in range(valor):
soma += ((i**2)+1)/(i+3)
return soma
a = int(input("Digite o 1º Numero :-> "))
result = somaSerie(a)
print(result)
|
flexible
|
{
"blob_id": "8114d8162bab625854804d1df2b4a9c11818d35e",
"index": 3747,
"step-1": "<mask token>\n",
"step-2": "def somaSerie(valor):\n soma = 0\n for i in range(valor):\n soma += (i ** 2 + 1) / (i + 3)\n return soma\n\n\n<mask token>\n",
"step-3": "def somaSerie(valor):\n soma = 0\n for i in range(valor):\n soma += (i ** 2 + 1) / (i + 3)\n return soma\n\n\n<mask token>\nprint(result)\n",
"step-4": "def somaSerie(valor):\n soma = 0\n for i in range(valor):\n soma += (i ** 2 + 1) / (i + 3)\n return soma\n\n\na = int(input('Digite o 1º Numero :-> '))\nresult = somaSerie(a)\nprint(result)\n",
"step-5": "def somaSerie(valor):\n soma = 0\n for i in range(valor):\n soma += ((i**2)+1)/(i+3)\n return soma\n\na = int(input(\"Digite o 1º Numero :-> \"))\nresult = somaSerie(a)\nprint(result)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
n=int(input("Enter the number of votes : "))
print()
path="C:\\Program Files\\chromedriver.exe"
driver=webdriver.Chrome(path)
driver.get("https://strawpoll.com/jhzd6qwjw")
for i in range(0,n+1):
driver.delete_all_cookies()
try:
button=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//input[@value='9c1zz2ugv55r']")))
driver.execute_script("arguments[0].click();", button)
buttons=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//button[@class='button is-primary is-fullwidth']")))
driver.execute_script("arguments[0].click();", buttons)
except:
print()
try:
c=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//h1[@class='title']")))
driver.back()
print("Vote Successful")
print()
except:
print()
if i==n-1:
driver.quit()
|
normal
|
{
"blob_id": "0e2b4e8e8c5a728e5123dfa704007b0f6adaf1e1",
"index": 4561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint()\n<mask token>\ndriver.get('https://strawpoll.com/jhzd6qwjw')\nfor i in range(0, n + 1):\n driver.delete_all_cookies()\n try:\n button = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//input[@value='9c1zz2ugv55r']\")))\n driver.execute_script('arguments[0].click();', button)\n buttons = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//button[@class='button is-primary is-fullwidth']\")))\n driver.execute_script('arguments[0].click();', buttons)\n except:\n print()\n try:\n c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='title']\")))\n driver.back()\n print('Vote Successful')\n print()\n except:\n print()\n if i == n - 1:\n driver.quit()\n",
"step-3": "<mask token>\nn = int(input('Enter the number of votes : '))\nprint()\npath = 'C:\\\\Program Files\\\\chromedriver.exe'\ndriver = webdriver.Chrome(path)\ndriver.get('https://strawpoll.com/jhzd6qwjw')\nfor i in range(0, n + 1):\n driver.delete_all_cookies()\n try:\n button = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//input[@value='9c1zz2ugv55r']\")))\n driver.execute_script('arguments[0].click();', button)\n buttons = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//button[@class='button is-primary is-fullwidth']\")))\n driver.execute_script('arguments[0].click();', buttons)\n except:\n print()\n try:\n c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='title']\")))\n driver.back()\n print('Vote Successful')\n print()\n except:\n print()\n if i == n - 1:\n driver.quit()\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nn = int(input('Enter the number of votes : '))\nprint()\npath = 'C:\\\\Program Files\\\\chromedriver.exe'\ndriver = webdriver.Chrome(path)\ndriver.get('https://strawpoll.com/jhzd6qwjw')\nfor i in range(0, n + 1):\n driver.delete_all_cookies()\n try:\n button = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//input[@value='9c1zz2ugv55r']\")))\n driver.execute_script('arguments[0].click();', button)\n buttons = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//button[@class='button is-primary is-fullwidth']\")))\n driver.execute_script('arguments[0].click();', buttons)\n except:\n print()\n try:\n c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='title']\")))\n driver.back()\n print('Vote Successful')\n print()\n except:\n print()\n if i == n - 1:\n driver.quit()\n",
"step-5": "from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By \r\nfrom selenium.webdriver.support.ui import WebDriverWait \r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time\r\nn=int(input(\"Enter the number of votes : \"))\r\nprint()\r\npath=\"C:\\\\Program Files\\\\chromedriver.exe\"\r\ndriver=webdriver.Chrome(path)\r\ndriver.get(\"https://strawpoll.com/jhzd6qwjw\")\r\nfor i in range(0,n+1):\r\n driver.delete_all_cookies()\r\n try:\r\n button=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//input[@value='9c1zz2ugv55r']\")))\r\n driver.execute_script(\"arguments[0].click();\", button)\r\n buttons=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//button[@class='button is-primary is-fullwidth']\")))\r\n driver.execute_script(\"arguments[0].click();\", buttons)\r\n except:\r\n print()\r\n try:\r\n c=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//h1[@class='title']\")))\r\n driver.back()\r\n print(\"Vote Successful\")\r\n print()\r\n except:\r\n print()\r\n if i==n-1:\r\n driver.quit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import copy
random.seed(42)
import csv
import torch
import time
import statistics
import wandb
from model import Net, LinearRegression, LogisticRegression
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = [
"GGGTGGGGGGAGTTTGCTCCTGG",
"GACCCCCTCCACCCCGCCTCCGG",
"GGCCTCCCCAAAGCCTGGCCAGG",
"GAACACAAAGCATAGACTGCGGG"
]
test_guides = test
if test==None:
test_guides = [
"GCAAAACTCAACCCTACCCCAGG",
"GGCCCAGACTGAGCACGTGATGG",
"GGGAAAGACCCAGCATCCGTGGG",
"GGAATCCCTTCTGCAGCACCTGG",
"GTGAGTGAGTGTGTGCGTGTGGG",
"GATGATGATGCCCCGGGCGTTGG",
"GCCGGAGGGGTTTGCACAGAAGG"
]
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1., 0.])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=.7, val=.1, test=.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = [
'Anderson',
'Ran',
]
test_studies = test
if test==None:
test_studies = [
'Kim',
'Tsai',
'Cho',
]
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1., 0.])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n =='a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
# import numpy as np
def dataLoader(file="crisprsql.csv", batch=64, mode="target"):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor([float(line['cleavage_freq'])])])
average_value.append(float(line['cleavage_freq']))
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(4 if torch.cuda.is_available() else 4)))
print(thisdata[0][0][0].size())
train = False
else:
dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(4 if torch.cuda.is_available() else 4)))
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
# from scipy.stats import rankdata
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {
# (23, 4)
'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0),
# (1)
'cfd': torch.squeeze(item[1]).unsqueeze_(dim=0)
}
return sample
def collate_fn(batch):
# (256, 23, 4)
# (256, 1)
# print(sum(list(batch[0]['cfd'].shape)), sum(list(batch[0]['target'].shape, sum(list(batch[0]['guide'].shape)))))
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)) > 0 and sum(list(i['guide'].shape)) > 0 :
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape), i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
# print(b[key])s
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
# output = {
# key: torch.stack([batch[i][key] for i in range(len(batch)) \
# if all( len(batch[i][k].shape) > 0 for k in batch[0].keys() )
# ])
# for key in batch[0].keys()
# }
return output
import pandas as pd
def rankDataLoader(file="crisprsql.csv", batch=64, mode="target"):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
# df.drop(df.columns.difference(['cleavage_freq']), 1, inplace=True)
# pd.to_numeric(df['cleavage_freq']
pd.to_numeric(df.cleavage_freq, errors='coerce')
# cleave = df.cleavage_freq
# df_ = pd.DataFrame(loadData[t]).drop(['cleavage_freq'], 1, inplace=True)
# df_.join(cleave)
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict("records"):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor(ranks.index(float(line['cleavage_freq'])) / len(ranks))])
average_value.append(float(line['cleavage_freq']))
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))
# print(thisdata[0][0][0])
train = False
else:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))
# import pdb; pdb.set_trace()
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file="augmentcrisprsql.csv", batch=64, mode="target", target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)) > 0 and sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)) > 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor(label)])
average_value.append(label)
# print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)))
else:
q+=1
print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)))
# print(torch.tensor([label), len(torch.tensor([label]).shape))
print(q)
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=4))
# print(thisdata[0][0][0])
train = False
else:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=4))
# import pdb; pdb.set_trace()
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
# print(len(llabels), len(loutputs))
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= .01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= .01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres +=1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel, thisoutput)
average_values[.1/i] = [roc_auc_score(thislabel, thisoutput), auc(lr_recall, lr_precision), pres/totalpres]
return average_values
def accuracy(labels, outputs, percent=.10):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
# print(llabels)
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 + percent) >= loutputs[i]:
correct +=1
total += 1
return correct / total
def percentError(outputs, labels):
return torch.mean(torch.abs(labels - outputs) / labels)
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss+=1
correct += (predicted == labels).sum().item()
loss+=crit(outputs, labels)
if logpath!= None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
f.write(f"total: {total} correct: {correct}")
f.write(f'loss: {loss/totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print(f"total: {total} correct: {correct}")
print(f'loss: {loss/totalloss}')
return 100 * correct / total
def getAllStudy():
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"]:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row["study_name"]] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def getallGuide():
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"]:
try:
alls[row['grna_target_sequence']].add(row['target_sequence'])
except KeyError:
alls[row["grna_target_sequence"]] = set(row['target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def aboveandbelow(threshold):
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
above = 0
total = 0
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"] and row['cleavage_freq'] != '':
if float(row['cleavage_freq']) > threshold:
above+=1
total+=1
print(f'Above: {above / total}%. Below: {(total - above) / total}')
def NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net, device, optim_time=None, logpath=None):
net.to(device)
#def optim, loss, and init graph data
criterion = crit
optimizer = optim
# get all labels for ROC
full_full_labels = None
for i, data in enumerate(train_data, 0):
if full_full_labels == None:
full_full_labels = data[1].to(device)
else:
full_full_labels = torch.cat((full_full_labels, data[1].to(device)), 0)
full_val_labels = None
for i, data in enumerate(val_data, 0):
if full_val_labels == None:
full_val_labels = data[1].to(device)
else:
full_val_labels = torch.cat((full_val_labels, data[1].to(device)), 0)
print("begin training")
if logpath!= None:
f = open(logpath, 'w')
#these go down, and random loss is ~2.303 so 15 will be replaced
best = 15
bestval = 15
bestepoch = 0
e = 0
# begin training loop, larget loop is for lr scedule
times = list()
# bestnet = LogisticRegression()
# bestnet.load_state_dict(copy.deepcopy(net.state_dict()))
for q in optim_time:
optimizer = q[1]
print(q[0])
# net.load_state_dict(copy.deepcopy(bestnet.state_dict())
# print(
# 'params', [p for p in net.parameters()],
# '\ngrads', [p.grad for p in net.parameters()]
# )
# epoch loop
for epoch in range(q[0]): # loop over the dataset multiple times
ftime = time.monotonic()
random.shuffle(train_data)
correct = 0
total = 0
running_loss = 0.0
# train mode
net.train()
full_output = None
full_labels = None
full_full_output = None
for i, data in enumerate(train_data, 0):
# train step
inputs, labels = data[0], data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
# t = time.monotonic()
outputs = net(inputs)
# print(time.monotonic - t, " seconds for 512 outputs")
loss = criterion(outputs, labels)
loss.backward()
# import pdb; pdb.set_trace()
# things to look at:
# - loss
# - parameters
# - inputs
# - grads
# if e % 300 == 299:
# print(
# 'loss', loss,
# # '\ninputs', inputs,
# '\nlabels', labels,
# '\noutputs', outputs
# )
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total+= labels.size(0)
correct += (predicted == labels).sum().item()
# print()
running_loss += loss.item()
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
if full_labels == None:
full_labels = labels
else:
full_labels = torch.cat((full_labels, labels), 0)
# w = {f'output {i}': outputs.flatten()[i] for i in range(outputs.flatten().size(0))}
# w.update({
# f'label {i}': labels.flatten()[i] for i in range(labels.flatten().size(0))
# })
w = ({'loss': loss.item(),
'accuracy': accuracy(labels, outputs),
'percent error': percentError(outputs, labels)})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
# print statistics
if i % batch_per == batch_per - 1: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(e + 1, i + 1, running_loss / batch_per))
# best = min(best, running_loss / batch_per)
# print('Accuracy of the network on the ' + str(batch_per) + 'th update: %d %%' % (
# 100 * correct / total))
wl = roc(full_labels, full_output)
wandlog = {}
for q in wl:
wandlog[f"midepoch ROC_AUC"] = wl[q][0]
wandlog[f"midepoch PR_AUC"] = wl[q][1]
wandlog[f"midepoch threshhold accuracy"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / batch_per,
# "TYPE": "TRAIN",
# 'EPOCH': e+1,
# 'UPDATE': (e*len(train_data)) + i + 1})
w.update({'midepoch loss': loss.item(),
'midepoch accuracy': accuracy(labels, outputs),
'midepoch percent error': percentError(outputs, labels)})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
wandb.log(wandlog)
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0)
full_output = None
full_labels = None
running_loss = 0
correct = 0
total = 0
# print('[%d] loss: %.20f' %
# (epoch + 1, running_loss / total))
# if logpath != None:
# f.write('[%d] loss: %.20f' %
# (epoch + 1, running_loss / total))
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0)
# ROC is commented out when training on 10 samples
wl = roc(full_full_labels, full_full_output)
w = {}
for q in wl:
w[f"epoch ROC_AUC"] = wl[q][0]
w[f"epoch PR_AUC"] = wl[q][1]
w[f"epoch threshhold accuracy"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / batch_per,
# "TYPE": "TRAIN",
# 'EPOCH': e+1,
# 'UPDATE': (e + 1) *len(train_data)})
w.update({'epoch loss': loss.item(),
'epoch accuracy': accuracy(full_full_labels, full_full_output),
'epoch percent error': percentError(full_full_output, full_full_labels),
'label': labels.flatten()[0],
'output': outputs.flatten()[0]})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
if w['epoch accuracy'] == 1:
PATH = f'.accuracynet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch PR_AUC'] == 1:
PATH = f'.PRnet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch ROC_AUC'] == 1:
PATH = f'.ROCnet.pth'
torch.save(net.state_dict(), PATH)
# wandb.log(wandlog)
full_output = None
full_full_output = None
running_loss = 0
correct = 0
total = 0
running_loss = 0
net.eval()
correct = 0
total = 0
if e % 10 == 9:
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
#check val set
for i, data in enumerate(val_data, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
running_loss += loss.item()
total+= labels.size(0)
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
# if e % 300 == 299:
print(f'Validation loss for Epoch [{e +1}]: {running_loss/total}')
# if logpath != None:
# f.write(f'Validation loss for Epoch [{epoch}]: {running_loss/total}')
# wl = roc(full_val_labels, full_output)
wandlog = {}
# for q in wl:
# wandlog[f"{q} ROC_AUC"] = wl[q][0]
# wandlog[f"{q} PR_AUC"] = wl[q][1]
# wandlog[f"{q} ACCURACY"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / len(val_data),
# "TYPE": "VAL",
# 'EPOCH': e+1,
# 'UPDATE': (e + 1)*len(train_data)})
# wandb.log(wandlog)
# best = min(best, running_loss / total)
# early stop just goes to the next lr change checkpoint
if bestval <= running_loss / total:
# if epoch >= 5:
# print('Early Stop')
# print(f"Best Validation loss: {bestval}")
# print(f"Current Validation loss: {running_loss / total}")
e = e
# break
# continue
# return
else:
# bestnet.load_state_dict(copy.deepcopy(net.state_dict()))
bestepoch = e
bestval = running_loss / total
running_loss = 0
correct = 0
total = 0
times.append(time.monotonic() - ftime)
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
# if e % 300 == 299:
print('time for epoch: ', times[-1], 'seconds')
if logpath != None:
f.write(f'time for epoch: {times[-1]}, seconds')
e+=1
# finish training. in future dont plot and save here just return them
print('Finished Training')
print('average time per epoch: ', sum(times)/len(times), 'seconds')
if logpath != None:
f.write('Finished Training')
f.write(f'average time per epoch: {sum(times)/len(times)} seconds')
f.close()
return
# def compute_dataframe(df: pd.DataFrame, checkpoint_path: str):
# model = LogisticRegression().load_state_dict(torch.load(checkpoint_path, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
# targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()
# preds = []
# for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):
# pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
# preds.append(pred.item())
# df['pred'] = preds
# return df
def compute_dataframe(df: pd.DataFrame, checkpoint_path):
model = checkpoint_path
targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()
preds = []
for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):
pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
preds.append(pred.item())
df['pred'] = preds
return df
|
normal
|
{
"blob_id": "a0059563b2eed4ca185a8e0971e8e0c80f5fb8f8",
"index": 6668,
"step-1": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\n<mask token>\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\n<mask token>\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\n<mask token>\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\n<mask token>\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\n<mask token>\n\n\ndef aboveandbelow(threshold):\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[\n 'cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above += 1\n total += 1\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\n<mask token>\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\ndef percentError(outputs, labels):\n return torch.mean(torch.abs(labels - outputs) / labels)\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef getallGuide():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['grna_target_sequence']].add(row[\n 'target_sequence'])\n except KeyError:\n alls[row['grna_target_sequence']] = set(row[\n 'target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef aboveandbelow(threshold):\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[\n 'cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above += 1\n total += 1\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\ndef NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net,\n device, optim_time=None, logpath=None):\n net.to(device)\n criterion = crit\n optimizer = optim\n full_full_labels = None\n for i, data in enumerate(train_data, 0):\n if full_full_labels == None:\n full_full_labels = data[1].to(device)\n else:\n full_full_labels = torch.cat((full_full_labels, data[1].to(\n device)), 0)\n full_val_labels = None\n for i, data in enumerate(val_data, 0):\n if full_val_labels == None:\n full_val_labels = data[1].to(device)\n else:\n full_val_labels = torch.cat((full_val_labels, data[1].to(device\n )), 0)\n print('begin training')\n if logpath != None:\n f = open(logpath, 'w')\n best = 15\n bestval = 15\n bestepoch = 0\n e = 0\n times = list()\n for q in optim_time:\n optimizer = q[1]\n print(q[0])\n for epoch in range(q[0]):\n ftime = time.monotonic()\n random.shuffle(train_data)\n correct = 0\n total = 0\n running_loss = 0.0\n net.train()\n full_output = None\n full_labels = None\n full_full_output = None\n for i, data in enumerate(train_data, 0):\n inputs, labels = data[0], data[1].to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n running_loss += loss.item()\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n if full_labels == None:\n full_labels = labels\n else:\n full_labels = torch.cat((full_labels, labels), 0)\n w = {'loss': loss.item(), 'accuracy': accuracy(labels,\n outputs), 'percent error': percentError(outputs, labels)}\n wandb.log(w)\n if i % batch_per == batch_per - 1:\n print('[%d, %5d] loss: %.3f' % (e + 1, i + 1, \n running_loss / batch_per))\n wl = roc(full_labels, full_output)\n wandlog = {}\n for q in wl:\n wandlog[f'midepoch ROC_AUC'] = wl[q][0]\n wandlog[f'midepoch PR_AUC'] = wl[q][1]\n wandlog[f'midepoch threshhold accuracy'] = wl[q][2]\n w.update({'midepoch loss': loss.item(),\n 'midepoch accuracy': accuracy(labels, outputs),\n 'midepoch percent error': percentError(outputs,\n labels)})\n wandb.log(w)\n wandb.log(wandlog)\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output,\n full_output), 0)\n full_output = None\n full_labels = None\n running_loss = 0\n correct = 0\n total = 0\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0\n )\n wl = roc(full_full_labels, full_full_output)\n w = {}\n for q in wl:\n w[f'epoch ROC_AUC'] = wl[q][0]\n w[f'epoch PR_AUC'] = wl[q][1]\n w[f'epoch threshhold accuracy'] = wl[q][2]\n w.update({'epoch loss': loss.item(), 'epoch accuracy': accuracy\n (full_full_labels, full_full_output), 'epoch percent error':\n percentError(full_full_output, full_full_labels), 'label':\n labels.flatten()[0], 'output': outputs.flatten()[0]})\n wandb.log(w)\n if w['epoch accuracy'] == 1:\n PATH = f'.accuracynet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch PR_AUC'] == 1:\n PATH = f'.PRnet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch ROC_AUC'] == 1:\n PATH = f'.ROCnet.pth'\n torch.save(net.state_dict(), PATH)\n full_output = None\n full_full_output = None\n running_loss = 0\n correct = 0\n total = 0\n running_loss = 0\n net.eval()\n correct = 0\n total = 0\n if e % 10 == 9:\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n for i, data in enumerate(val_data, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n running_loss += loss.item()\n total += labels.size(0)\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n print(\n f'Validation loss for Epoch [{e + 1}]: {running_loss / total}')\n wandlog = {}\n if bestval <= running_loss / total:\n e = e\n else:\n bestepoch = e\n bestval = running_loss / total\n running_loss = 0\n correct = 0\n total = 0\n times.append(time.monotonic() - ftime)\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n print('time for epoch: ', times[-1], 'seconds')\n if logpath != None:\n f.write(f'time for epoch: {times[-1]}, seconds')\n e += 1\n print('Finished Training')\n print('average time per epoch: ', sum(times) / len(times), 'seconds')\n if logpath != None:\n f.write('Finished Training')\n f.write(f'average time per epoch: {sum(times) / len(times)} seconds')\n f.close()\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\ndef dataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor([float(line['cleavage_freq'])])])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, True,\n num_workers=4 if torch.cuda.is_available() else 4))\n print(thisdata[0][0][0].size())\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, False,\n num_workers=4 if torch.cuda.is_available() else 4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\ndef percentError(outputs, labels):\n return torch.mean(torch.abs(labels - outputs) / labels)\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef getallGuide():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['grna_target_sequence']].add(row[\n 'target_sequence'])\n except KeyError:\n alls[row['grna_target_sequence']] = set(row[\n 'target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef aboveandbelow(threshold):\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[\n 'cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above += 1\n total += 1\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\ndef NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net,\n device, optim_time=None, logpath=None):\n net.to(device)\n criterion = crit\n optimizer = optim\n full_full_labels = None\n for i, data in enumerate(train_data, 0):\n if full_full_labels == None:\n full_full_labels = data[1].to(device)\n else:\n full_full_labels = torch.cat((full_full_labels, data[1].to(\n device)), 0)\n full_val_labels = None\n for i, data in enumerate(val_data, 0):\n if full_val_labels == None:\n full_val_labels = data[1].to(device)\n else:\n full_val_labels = torch.cat((full_val_labels, data[1].to(device\n )), 0)\n print('begin training')\n if logpath != None:\n f = open(logpath, 'w')\n best = 15\n bestval = 15\n bestepoch = 0\n e = 0\n times = list()\n for q in optim_time:\n optimizer = q[1]\n print(q[0])\n for epoch in range(q[0]):\n ftime = time.monotonic()\n random.shuffle(train_data)\n correct = 0\n total = 0\n running_loss = 0.0\n net.train()\n full_output = None\n full_labels = None\n full_full_output = None\n for i, data in enumerate(train_data, 0):\n inputs, labels = data[0], data[1].to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n running_loss += loss.item()\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n if full_labels == None:\n full_labels = labels\n else:\n full_labels = torch.cat((full_labels, labels), 0)\n w = {'loss': loss.item(), 'accuracy': accuracy(labels,\n outputs), 'percent error': percentError(outputs, labels)}\n wandb.log(w)\n if i % batch_per == batch_per - 1:\n print('[%d, %5d] loss: %.3f' % (e + 1, i + 1, \n running_loss / batch_per))\n wl = roc(full_labels, full_output)\n wandlog = {}\n for q in wl:\n wandlog[f'midepoch ROC_AUC'] = wl[q][0]\n wandlog[f'midepoch PR_AUC'] = wl[q][1]\n wandlog[f'midepoch threshhold accuracy'] = wl[q][2]\n w.update({'midepoch loss': loss.item(),\n 'midepoch accuracy': accuracy(labels, outputs),\n 'midepoch percent error': percentError(outputs,\n labels)})\n wandb.log(w)\n wandb.log(wandlog)\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output,\n full_output), 0)\n full_output = None\n full_labels = None\n running_loss = 0\n correct = 0\n total = 0\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0\n )\n wl = roc(full_full_labels, full_full_output)\n w = {}\n for q in wl:\n w[f'epoch ROC_AUC'] = wl[q][0]\n w[f'epoch PR_AUC'] = wl[q][1]\n w[f'epoch threshhold accuracy'] = wl[q][2]\n w.update({'epoch loss': loss.item(), 'epoch accuracy': accuracy\n (full_full_labels, full_full_output), 'epoch percent error':\n percentError(full_full_output, full_full_labels), 'label':\n labels.flatten()[0], 'output': outputs.flatten()[0]})\n wandb.log(w)\n if w['epoch accuracy'] == 1:\n PATH = f'.accuracynet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch PR_AUC'] == 1:\n PATH = f'.PRnet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch ROC_AUC'] == 1:\n PATH = f'.ROCnet.pth'\n torch.save(net.state_dict(), PATH)\n full_output = None\n full_full_output = None\n running_loss = 0\n correct = 0\n total = 0\n running_loss = 0\n net.eval()\n correct = 0\n total = 0\n if e % 10 == 9:\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n for i, data in enumerate(val_data, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n running_loss += loss.item()\n total += labels.size(0)\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n print(\n f'Validation loss for Epoch [{e + 1}]: {running_loss / total}')\n wandlog = {}\n if bestval <= running_loss / total:\n e = e\n else:\n bestepoch = e\n bestval = running_loss / total\n running_loss = 0\n correct = 0\n total = 0\n times.append(time.monotonic() - ftime)\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n print('time for epoch: ', times[-1], 'seconds')\n if logpath != None:\n f.write(f'time for epoch: {times[-1]}, seconds')\n e += 1\n print('Finished Training')\n print('average time per epoch: ', sum(times) / len(times), 'seconds')\n if logpath != None:\n f.write('Finished Training')\n f.write(f'average time per epoch: {sum(times) / len(times)} seconds')\n f.close()\n return\n\n\ndef compute_dataframe(df: pd.DataFrame, checkpoint_path):\n model = checkpoint_path\n targets, targets_s, guides, guides_s = df.target_sequence.tolist(\n ), df.target_strand.tolist(), df.grna_target_sequence.tolist(\n ), df.grna_target_strand.tolist()\n preds = []\n for guide, target, guide_s, target_s in zip(guides, targets, guides_s,\n targets_s):\n pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])\n preds.append(pred.item())\n df['pred'] = preds\n return df\n",
"step-5": "import random\nimport copy\nrandom.seed(42)\nimport csv\nimport torch\nimport time\nimport statistics\nimport wandb\nfrom model import Net, LinearRegression, LogisticRegression\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = [\n \"GGGTGGGGGGAGTTTGCTCCTGG\",\n \"GACCCCCTCCACCCCGCCTCCGG\",\n \"GGCCTCCCCAAAGCCTGGCCAGG\",\n \"GAACACAAAGCATAGACTGCGGG\"\n \n ]\n test_guides = test\n if test==None:\n test_guides = [\n \"GCAAAACTCAACCCTACCCCAGG\",\n \"GGCCCAGACTGAGCACGTGATGG\",\n \"GGGAAAGACCCAGCATCCGTGGG\",\n \"GGAATCCCTTCTGCAGCACCTGG\",\n \"GTGAGTGAGTGTGTGCGTGTGGG\",\n \"GATGATGATGCCCCGGGCGTTGG\",\n \"GCCGGAGGGGTTTGCACAGAAGG\"\n ]\n \n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1., 0.])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else: \n train_set.append(pair)\n return [train_set, val_set, test_set] \n\ndef byTarget(data, train=.7, val=.1, test=.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set] \n\n\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = [\n 'Anderson',\n 'Ran',\n \n ]\n test_studies = test\n if test==None:\n test_studies = [\n 'Kim',\n 'Tsai',\n 'Cho',\n ]\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1., 0.])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else: \n train_set.append(pair)\n return [train_set, val_set, test_set] \n\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n \n one_hot = torch.zeros((1, 4))\n if n =='a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0) \n if list(sequence.size())[0] > 23: \n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1]) \n return sequence \n\n \n# import numpy as np\n\ndef dataLoader(file=\"crisprsql.csv\", batch=64, mode=\"target\"):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == \"study\":\n loadData = byStudy(d)\n elif mode == \"guide\":\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n thisdata.append([\n [one_hot(line['grna_target_sequence'], line['grna_target_strand']), \n one_hot(line['target_sequence'], line[\"target_strand\"])],\n torch.tensor([float(line['cleavage_freq'])])])\n average_value.append(float(line['cleavage_freq'])) \n # if line \n\n\n # mode = 0\n # zero = 0\n # for p in average_value:\n # if p == statistics.mode(average_value):\n # mode+=1\n # if p <0:\n # zero+=1 \n # print(f\"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\\nMedian: {statistics.median(average_value)}.\\nMode: {statistics.mode(average_value)} with {mode} datapoint.\\nstandard deviation: {statistics.pstdev(average_value)}.\\nlowest value: {min(average_value)}.\\nHighest value: {max(average_value)}\\n{zero} datapoints below zero\\n\\n\")\n if train == True:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(4 if torch.cuda.is_available() else 4)))\n print(thisdata[0][0][0].size())\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(4 if torch.cuda.is_available() else 4)))\n \n thisdata1 = list() \n for i in range(int(len(thisdata)/batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n \n if ones == None:\n ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)\n twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)\n threes = thisdata[(i * batch) + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0) \n twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0) \n threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0) \n \n thisdata1.append([[ones, twos], threes]) \n\n\n data.append(thisdata1) \n \n print('time to load data: ', time.monotonic() - ftime, 'seconds') \n \n\n return [data, dl]\n\n# from scipy.stats import rankdata\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n def __init__(self, thisdata):\n self.thisdata = thisdata\n \n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {\n # (23, 4)\n 'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0),\n # (1)\n 'cfd': torch.squeeze(item[1]).unsqueeze_(dim=0)\n }\n return sample\n\n \ndef collate_fn(batch):\n # (256, 23, 4)\n # (256, 1)\n # print(sum(list(batch[0]['cfd'].shape)), sum(list(batch[0]['target'].shape, sum(list(batch[0]['guide'].shape)))))\n\n output = {}\n\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)) > 0 and sum(list(i['guide'].shape)) > 0 :\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape), i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n\n for key in b.keys():\n # print(b[key])s\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n\n\n\n\n\n\n # output = {\n # key: torch.stack([batch[i][key] for i in range(len(batch)) \\\n # if all( len(batch[i][k].shape) > 0 for k in batch[0].keys() )\n # ])\n # for key in batch[0].keys()\n # }\n\n return output\nimport pandas as pd\n\ndef rankDataLoader(file=\"crisprsql.csv\", batch=64, mode=\"target\"):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == \"study\":\n loadData = byStudy(d)\n elif mode == \"guide\":\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n \n df = pd.DataFrame(loadData[t])\n\n # df.drop(df.columns.difference(['cleavage_freq']), 1, inplace=True)\n # pd.to_numeric(df['cleavage_freq']\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n # cleave = df.cleavage_freq\n \n # df_ = pd.DataFrame(loadData[t]).drop(['cleavage_freq'], 1, inplace=True)\n # df_.join(cleave)\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict(\"records\"):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n thisdata.append([\n [one_hot(line['grna_target_sequence'], line['grna_target_strand']), \n one_hot(line['target_sequence'], line[\"target_strand\"])],\n torch.tensor(ranks.index(float(line['cleavage_freq'])) / len(ranks))])\n average_value.append(float(line['cleavage_freq'])) \n # if line \n\n\n # mode = 0\n # zero = 0\n # for p in average_value:\n # if p == statistics.mode(average_value):\n # mode+=1\n # if p <0:\n # zero+=1 \n # print(f\"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\\nMedian: {statistics.median(average_value)}.\\nMode: {statistics.mode(average_value)} with {mode} datapoint.\\nstandard deviation: {statistics.pstdev(average_value)}.\\nlowest value: {min(average_value)}.\\nHighest value: {max(average_value)}\\n{zero} datapoints below zero\\n\\n\")\n if train == True:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))\n \n # print(thisdata[0][0][0])\n train = False\n else:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))\n # import pdb; pdb.set_trace()\n thisdata1 = list() \n for i in range(int(len(thisdata)/batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n \n if ones == None:\n ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)\n twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)\n threes = thisdata[(i * batch) + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0) \n twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0) \n threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0) \n \n thisdata1.append([[ones, twos], threes]) \n\n\n data.append(thisdata1) \n \n print('time to load data: ', time.monotonic() - ftime, 'seconds') \n \n return [data, dl]\n\n\n\n\n\n\n\ndef fullDataLoader(file=\"augmentcrisprsql.csv\", batch=64, mode=\"target\", target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == \"study\":\n loadData = byStudy(d)\n elif mode == \"guide\":\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n \n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']) == 0 else [1, 0]\n\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)) > 0 and sum(list(one_hot(line['target_sequence'], line[\"target_strand\"]).shape)) > 0:\n thisdata.append([\n [one_hot(line['grna_target_sequence'], line['grna_target_strand']), \n one_hot(line['target_sequence'], line[\"target_strand\"])],\n torch.tensor(label)])\n average_value.append(label)\n # print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line[\"target_strand\"]).shape)))\n \n else:\n q+=1\n print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line[\"target_strand\"]).shape)))\n # print(torch.tensor([label), len(torch.tensor([label]).shape))\n print(q)\n # if line \n\n\n # mode = 0\n # zero = 0\n # for p in average_value:\n # if p == statistics.mode(average_value):\n # mode+=1\n # if p <0:\n # zero+=1 \n # print(f\"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\\nMedian: {statistics.median(average_value)}.\\nMode: {statistics.mode(average_value)} with {mode} datapoint.\\nstandard deviation: {statistics.pstdev(average_value)}.\\nlowest value: {min(average_value)}.\\nHighest value: {max(average_value)}\\n{zero} datapoints below zero\\n\\n\")\n if train == True:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=4))\n \n # print(thisdata[0][0][0])\n train = False\n else:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=4))\n # import pdb; pdb.set_trace()\n thisdata1 = list() \n for i in range(int(len(thisdata)/batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n \n if ones == None:\n ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)\n twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)\n threes = thisdata[(i * batch) + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0) \n twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0) \n threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0) \n \n thisdata1.append([[ones, twos], threes]) \n\n\n data.append(thisdata1) \n\n print('time to load data: ', time.monotonic() - ftime, 'seconds') \n\n return [data, dl]\n\n\n\n\n\n\n\n\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import auc\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n # print(len(llabels), len(loutputs))\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n\n if llabels[j] <= .01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1) \n if loutputs[j] <= .01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres +=1 \n lr_precision, lr_recall, _ = precision_recall_curve(thislabel, thisoutput)\n average_values[.1/i] = [roc_auc_score(thislabel, thisoutput), auc(lr_recall, lr_precision), pres/totalpres]\n return average_values \n\n\ndef accuracy(labels, outputs, percent=.10):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n # print(llabels)\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 + percent) >= loutputs[i]:\n correct +=1\n total += 1\n\n return correct / total \n\n\ndef percentError(outputs, labels):\n return torch.mean(torch.abs(labels - outputs) / labels)\n\n\n \n\n\n \n\n\ndef Test(net, dataset, device, crit, logpath=None):\n \n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device) \n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss+=1\n correct += (predicted == labels).sum().item()\n loss+=crit(outputs, labels)\n if logpath!= None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n f.write(f\"total: {total} correct: {correct}\")\n f.write(f'loss: {loss/totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n print(f\"total: {total} correct: {correct}\") \n print(f'loss: {loss/totalloss}')\n return 100 * correct / total \n\ndef getAllStudy():\n with open(\"crisprsql.csv\") as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in [\"C\", 'G', 'A', \"T\"]:\n try:\n alls[row['study_name']].add(row['grna_target_sequence']) \n except KeyError:\n alls[row[\"study_name\"]] = set(row['grna_target_sequence']) \n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n \n\ndef getallGuide():\n with open(\"crisprsql.csv\") as f:\n data = csv.DictReader(f)\n alls = dict()\n\n for row in data:\n if row['grna_target_sequence'] not in [\"C\", 'G', 'A', \"T\"]:\n try:\n alls[row['grna_target_sequence']].add(row['target_sequence']) \n except KeyError:\n alls[row[\"grna_target_sequence\"]] = set(row['target_sequence']) \n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n \n\ndef aboveandbelow(threshold):\n with open(\"crisprsql.csv\") as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in [\"C\", 'G', 'A', \"T\"] and row['cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above+=1\n total+=1\n \n\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\n\n\n\n\n\ndef NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net, device, optim_time=None, logpath=None):\n net.to(device)\n #def optim, loss, and init graph data\n criterion = crit\n optimizer = optim\n # get all labels for ROC\n full_full_labels = None\n for i, data in enumerate(train_data, 0):\n if full_full_labels == None:\n full_full_labels = data[1].to(device) \n else:\n full_full_labels = torch.cat((full_full_labels, data[1].to(device)), 0) \n full_val_labels = None \n for i, data in enumerate(val_data, 0):\n if full_val_labels == None:\n full_val_labels = data[1].to(device) \n else:\n full_val_labels = torch.cat((full_val_labels, data[1].to(device)), 0) \n print(\"begin training\")\n if logpath!= None:\n f = open(logpath, 'w')\n #these go down, and random loss is ~2.303 so 15 will be replaced\n best = 15\n bestval = 15\n bestepoch = 0\n e = 0\n # begin training loop, larget loop is for lr scedule\n times = list()\n # bestnet = LogisticRegression()\n # bestnet.load_state_dict(copy.deepcopy(net.state_dict()))\n for q in optim_time:\n optimizer = q[1]\n print(q[0])\n # net.load_state_dict(copy.deepcopy(bestnet.state_dict())\n # print(\n # 'params', [p for p in net.parameters()], \n # '\\ngrads', [p.grad for p in net.parameters()] \n # )\n # epoch loop\n for epoch in range(q[0]): # loop over the dataset multiple times\n ftime = time.monotonic()\n random.shuffle(train_data)\n correct = 0\n total = 0\n running_loss = 0.0\n # train mode\n net.train()\n full_output = None\n full_labels = None\n full_full_output = None\n \n for i, data in enumerate(train_data, 0):\n \n # train step\n inputs, labels = data[0], data[1].to(device) \n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n # t = time.monotonic()\n outputs = net(inputs)\n # print(time.monotonic - t, \" seconds for 512 outputs\")\n loss = criterion(outputs, labels)\n loss.backward()\n # import pdb; pdb.set_trace()\n # things to look at:\n # - loss\n # - parameters\n # - inputs\n # - grads\n # if e % 300 == 299:\n\n # print(\n # 'loss', loss, \n # # '\\ninputs', inputs,\n # '\\nlabels', labels,\n # '\\noutputs', outputs\n # )\n \n optimizer.step()\n _, predicted = torch.max(outputs.data, 1)\n total+= labels.size(0) \n correct += (predicted == labels).sum().item()\n # print()\n \n running_loss += loss.item()\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n\n if full_labels == None:\n full_labels = labels\n else:\n full_labels = torch.cat((full_labels, labels), 0) \n # w = {f'output {i}': outputs.flatten()[i] for i in range(outputs.flatten().size(0))}\n # w.update({\n # f'label {i}': labels.flatten()[i] for i in range(labels.flatten().size(0))\n # })\n w = ({'loss': loss.item(), \n 'accuracy': accuracy(labels, outputs),\n 'percent error': percentError(outputs, labels)})\n wandb.log(\n # {\n # 'loss': loss.item(), \n # # 'params': [p for p in net.parameters()], \n # # 'grads': [p.grad for p in net.parameters()], \n # # 'inputs': inputs,\n # f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),\n # f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),\n # 'accuracy': accuracy(labels, outputs)\n # }\n w\n )\n # print statistics\n if i % batch_per == batch_per - 1: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (e + 1, i + 1, running_loss / batch_per))\n # best = min(best, running_loss / batch_per)\n \n # print('Accuracy of the network on the ' + str(batch_per) + 'th update: %d %%' % (\n # 100 * correct / total))\n \n wl = roc(full_labels, full_output)\n wandlog = {}\n for q in wl:\n wandlog[f\"midepoch ROC_AUC\"] = wl[q][0]\n wandlog[f\"midepoch PR_AUC\"] = wl[q][1]\n wandlog[f\"midepoch threshhold accuracy\"] = wl[q][2]\n\n\n\n # wandlog.update({\n # \"LOSS\": running_loss / batch_per, \n # \"TYPE\": \"TRAIN\", \n # 'EPOCH': e+1, \n # 'UPDATE': (e*len(train_data)) + i + 1})\n w.update({'midepoch loss': loss.item(), \n 'midepoch accuracy': accuracy(labels, outputs),\n 'midepoch percent error': percentError(outputs, labels)})\n wandb.log(\n # {\n # 'loss': loss.item(), \n # # 'params': [p for p in net.parameters()], \n # # 'grads': [p.grad for p in net.parameters()], \n # # 'inputs': inputs,\n # f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),\n # f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),\n # 'accuracy': accuracy(labels, outputs)\n # }\n w\n )\n wandb.log(wandlog)\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0) \n \n full_output = None\n full_labels = None\n\n\n running_loss = 0\n correct = 0\n total = 0\n # print('[%d] loss: %.20f' %\n # (epoch + 1, running_loss / total))\n # if logpath != None:\n # f.write('[%d] loss: %.20f' %\n # (epoch + 1, running_loss / total)) \n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0) \n # ROC is commented out when training on 10 samples\n wl = roc(full_full_labels, full_full_output)\n w = {}\n\n for q in wl:\n w[f\"epoch ROC_AUC\"] = wl[q][0]\n w[f\"epoch PR_AUC\"] = wl[q][1]\n w[f\"epoch threshhold accuracy\"] = wl[q][2]\n # wandlog.update({\n # \"LOSS\": running_loss / batch_per, \n # \"TYPE\": \"TRAIN\", \n # 'EPOCH': e+1, \n # 'UPDATE': (e + 1) *len(train_data)}) \n w.update({'epoch loss': loss.item(), \n 'epoch accuracy': accuracy(full_full_labels, full_full_output),\n 'epoch percent error': percentError(full_full_output, full_full_labels),\n 'label': labels.flatten()[0],\n 'output': outputs.flatten()[0]})\n wandb.log(\n # {\n # 'loss': loss.item(), \n # # 'params': [p for p in net.parameters()], \n # # 'grads': [p.grad for p in net.parameters()], \n # # 'inputs': inputs,\n # f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),\n # f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),\n # 'accuracy': accuracy(labels, outputs)\n # }\n w\n ) \n if w['epoch accuracy'] == 1:\n\n PATH = f'.accuracynet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch PR_AUC'] == 1:\n\n PATH = f'.PRnet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch ROC_AUC'] == 1:\n\n PATH = f'.ROCnet.pth'\n torch.save(net.state_dict(), PATH)\n\n\n # wandb.log(wandlog) \n\n full_output = None\n full_full_output = None\n running_loss = 0\n correct = 0\n total = 0 \n running_loss = 0\n net.eval()\n correct = 0\n total = 0\n if e % 10 == 9:\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n #check val set\n for i, data in enumerate(val_data, 0):\n inputs, labels = data[0], data[1].to(device) \n outputs = net(inputs)\n loss = criterion(outputs, labels) \n loss.backward()\n running_loss += loss.item()\n total+= labels.size(0) \n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0) \n # if e % 300 == 299:\n print(f'Validation loss for Epoch [{e +1}]: {running_loss/total}') \n # if logpath != None:\n # f.write(f'Validation loss for Epoch [{epoch}]: {running_loss/total}') \n \n # wl = roc(full_val_labels, full_output)\n wandlog = {}\n # for q in wl:\n # wandlog[f\"{q} ROC_AUC\"] = wl[q][0]\n # wandlog[f\"{q} PR_AUC\"] = wl[q][1]\n # wandlog[f\"{q} ACCURACY\"] = wl[q][2]\n # wandlog.update({\n # \"LOSS\": running_loss / len(val_data), \n # \"TYPE\": \"VAL\", \n # 'EPOCH': e+1, \n # 'UPDATE': (e + 1)*len(train_data)}) \n # wandb.log(wandlog) \n # best = min(best, running_loss / total)\n # early stop just goes to the next lr change checkpoint\n \n if bestval <= running_loss / total:\n # if epoch >= 5:\n # print('Early Stop')\n # print(f\"Best Validation loss: {bestval}\")\n # print(f\"Current Validation loss: {running_loss / total}\")\n \n e = e\n # break\n # continue\n # return\n else:\n # bestnet.load_state_dict(copy.deepcopy(net.state_dict()))\n bestepoch = e\n bestval = running_loss / total\n\n running_loss = 0\n correct = 0\n total = 0\n times.append(time.monotonic() - ftime)\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n # if e % 300 == 299:\n print('time for epoch: ', times[-1], 'seconds')\n if logpath != None:\n f.write(f'time for epoch: {times[-1]}, seconds') \n e+=1\n \n\n\n\n\n # finish training. in future dont plot and save here just return them\n print('Finished Training')\n print('average time per epoch: ', sum(times)/len(times), 'seconds')\n if logpath != None:\n f.write('Finished Training')\n f.write(f'average time per epoch: {sum(times)/len(times)} seconds')\n f.close()\n \n return \n\n\n# def compute_dataframe(df: pd.DataFrame, checkpoint_path: str):\n# model = LogisticRegression().load_state_dict(torch.load(checkpoint_path, map_location=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")))\n# targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()\n# preds = []\n# for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):\n# pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])\n# preds.append(pred.item())\n# df['pred'] = preds\n# return df\n\ndef compute_dataframe(df: pd.DataFrame, checkpoint_path):\n model = checkpoint_path\n targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()\n preds = []\n for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):\n pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])\n preds.append(pred.item())\n df['pred'] = preds\n return df",
"step-ids": [
15,
16,
19,
21,
24
]
}
|
[
15,
16,
19,
21,
24
] |
# BotSetup.py
from websockets.exceptions import InvalidStatusCode
from dokbot.DokBotCog import DokBotCog
from events.EventCog import EventCog
from dotenv import load_dotenv
from datetime import datetime
from .DokBot import DokBot
import utils.Logger as Log
import logging
import os
import sys
import traceback
import discord
def run() -> None:
os.environ['TZ'] = 'Europe/Brussels'
if sys.platform != 'win32':
from time import tzset
tzset()
print(datetime.now())
load_dotenv()
Log.setup()
token = os.getenv('DISCORD_BOT_TOKEN')
assert token, "Could not find any dokbot bot token"
intents = discord.Intents.default()
intents.members = True
prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'
bot = DokBot(command_prefix=prefix, intents=intents)
bot.add_cog(DokBotCog(bot))
bot.add_cog(EventCog(bot))
@bot.event
async def on_ready():
logging.getLogger().info(f'{bot.user.name} has connected.')
#
# @discord_client.event
# async def on_message(message: discord.Message) -> None:
# if not discord_client.is_ready() or message.author == discord_client.user:
# return
# try:
# await command_runner.run_command_for_message(message)
# except Exception as ex:
# await handle_exception(ex, author=message.author, content=message.content)
#
# @discord_client.event
# async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:
# if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():
# return
# try:
# await signup_character(client=discord_client, reaction_event=reaction_event)
# except Exception as ex:
# user = await discord_client.fetch_user(reaction_event.user_id)
# await handle_exception(ex, author=user, content="Raid signup failed")
#
# async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:
# Log.error(f"{author}, {content}, {ex}\n{traceback.format_exc()}")
# if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):
# await author.send(ex.message)
# else:
# global maintainer
# if maintainer is None:
# maintainer = await discord_client.fetch_user(MAINTAINER_ID)
# await author.send(f"There were internal difficulties. Sending a message to {maintainer.display_name}")
# await maintainer.send(f'{author.display_name}, {content}, {ex}')
#
try:
bot.run(token)
except InvalidStatusCode as e:
error_message = f"Could not start client {e}\n{traceback.format_exc()}"
Log.error(error_message)
|
normal
|
{
"blob_id": "a7123fa221555b15162dbab0d93a86965190b805",
"index": 4141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run() ->None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n print(datetime.now())\n load_dotenv()\n Log.setup()\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, 'Could not find any dokbot bot token'\n intents = discord.Intents.default()\n intents.members = True\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f'Could not start client {e}\\n{traceback.format_exc()}'\n Log.error(error_message)\n",
"step-3": "from websockets.exceptions import InvalidStatusCode\nfrom dokbot.DokBotCog import DokBotCog\nfrom events.EventCog import EventCog\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom .DokBot import DokBot\nimport utils.Logger as Log\nimport logging\nimport os\nimport sys\nimport traceback\nimport discord\n\n\ndef run() ->None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n print(datetime.now())\n load_dotenv()\n Log.setup()\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, 'Could not find any dokbot bot token'\n intents = discord.Intents.default()\n intents.members = True\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f'Could not start client {e}\\n{traceback.format_exc()}'\n Log.error(error_message)\n",
"step-4": "# BotSetup.py\nfrom websockets.exceptions import InvalidStatusCode\nfrom dokbot.DokBotCog import DokBotCog\nfrom events.EventCog import EventCog\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom .DokBot import DokBot\n\nimport utils.Logger as Log\nimport logging\nimport os\nimport sys\nimport traceback\nimport discord\n\n\ndef run() -> None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n\n print(datetime.now())\n load_dotenv()\n Log.setup()\n\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, \"Could not find any dokbot bot token\"\n\n intents = discord.Intents.default()\n intents.members = True\n\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n\n #\n # @discord_client.event\n # async def on_message(message: discord.Message) -> None:\n # if not discord_client.is_ready() or message.author == discord_client.user:\n # return\n # try:\n # await command_runner.run_command_for_message(message)\n # except Exception as ex:\n # await handle_exception(ex, author=message.author, content=message.content)\n #\n # @discord_client.event\n # async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:\n # if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():\n # return\n # try:\n # await signup_character(client=discord_client, reaction_event=reaction_event)\n # except Exception as ex:\n # user = await discord_client.fetch_user(reaction_event.user_id)\n # await handle_exception(ex, author=user, content=\"Raid signup failed\")\n #\n # async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:\n # Log.error(f\"{author}, {content}, {ex}\\n{traceback.format_exc()}\")\n # if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):\n # await author.send(ex.message)\n # else:\n # global maintainer\n # if maintainer is None:\n # maintainer = await discord_client.fetch_user(MAINTAINER_ID)\n # await author.send(f\"There were internal difficulties. Sending a message to {maintainer.display_name}\")\n # await maintainer.send(f'{author.display_name}, {content}, {ex}')\n #\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f\"Could not start client {e}\\n{traceback.format_exc()}\"\n Log.error(error_message)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
import torch
from torch.utils.data import DataLoader
from nn_model import NNModel
def train(dataset: 'Dataset', epochs: int=10):
loader = DataLoader(dataset, batch_size=2, shuffle=True)
model = NNModel(n_input=2, n_output=3)
# model.to(device='cpu')
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
start_tm = time.time()
for epoch in range(1, epochs+1):
train_loss = 0.0
train_acc = 0
for x, y in loader:
optimizer.zero_grad()
y_pred = model(x)
y = torch.max(torch.squeeze(y, dim=1), dim=1).indices
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_acc += (y_pred.argmax(1) == y).sum().item()
print(f'[epoch {epoch:02d}]\tloss:{train_loss}\taccuracy:{train_acc}')
finish_tm = time.time()
print(f'train finished.({finish_tm-start_tm}sec)')
|
normal
|
{
"blob_id": "68bcb76a9c736e21cc1f54c6343c72b11e575b5d",
"index": 5093,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train(dataset: 'Dataset', epochs: int=10):\n loader = DataLoader(dataset, batch_size=2, shuffle=True)\n model = NNModel(n_input=2, n_output=3)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n criterion = torch.nn.CrossEntropyLoss()\n start_tm = time.time()\n for epoch in range(1, epochs + 1):\n train_loss = 0.0\n train_acc = 0\n for x, y in loader:\n optimizer.zero_grad()\n y_pred = model(x)\n y = torch.max(torch.squeeze(y, dim=1), dim=1).indices\n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_acc += (y_pred.argmax(1) == y).sum().item()\n print(f'[epoch {epoch:02d}]\\tloss:{train_loss}\\taccuracy:{train_acc}')\n finish_tm = time.time()\n print(f'train finished.({finish_tm - start_tm}sec)')\n",
"step-3": "import time\nimport torch\nfrom torch.utils.data import DataLoader\nfrom nn_model import NNModel\n\n\ndef train(dataset: 'Dataset', epochs: int=10):\n loader = DataLoader(dataset, batch_size=2, shuffle=True)\n model = NNModel(n_input=2, n_output=3)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n criterion = torch.nn.CrossEntropyLoss()\n start_tm = time.time()\n for epoch in range(1, epochs + 1):\n train_loss = 0.0\n train_acc = 0\n for x, y in loader:\n optimizer.zero_grad()\n y_pred = model(x)\n y = torch.max(torch.squeeze(y, dim=1), dim=1).indices\n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_acc += (y_pred.argmax(1) == y).sum().item()\n print(f'[epoch {epoch:02d}]\\tloss:{train_loss}\\taccuracy:{train_acc}')\n finish_tm = time.time()\n print(f'train finished.({finish_tm - start_tm}sec)')\n",
"step-4": "import time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom nn_model import NNModel\n\n\ndef train(dataset: 'Dataset', epochs: int=10):\n loader = DataLoader(dataset, batch_size=2, shuffle=True)\n\n model = NNModel(n_input=2, n_output=3)\n # model.to(device='cpu')\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n criterion = torch.nn.CrossEntropyLoss()\n \n start_tm = time.time()\n for epoch in range(1, epochs+1):\n train_loss = 0.0\n train_acc = 0\n for x, y in loader:\n optimizer.zero_grad()\n\n y_pred = model(x)\n y = torch.max(torch.squeeze(y, dim=1), dim=1).indices\n \n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_acc += (y_pred.argmax(1) == y).sum().item()\n print(f'[epoch {epoch:02d}]\\tloss:{train_loss}\\taccuracy:{train_acc}')\n finish_tm = time.time()\n print(f'train finished.({finish_tm-start_tm}sec)')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.1.2 on 2018-10-26 05:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('candidate', '0004_remove_candidate_corrected_loc'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='analytics_exp',
field=models.FloatField(blank=True, default=0.0),
),
]
|
normal
|
{
"blob_id": "eb75f6e959e9153e6588a0322d1ebc75e21e73ef",
"index": 8153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('candidate', '0004_remove_candidate_corrected_loc')]\n operations = [migrations.AlterField(model_name='candidate', name=\n 'analytics_exp', field=models.FloatField(blank=True, default=0.0))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('candidate', '0004_remove_candidate_corrected_loc')]\n operations = [migrations.AlterField(model_name='candidate', name=\n 'analytics_exp', field=models.FloatField(blank=True, default=0.0))]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-10-26 05:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('candidate', '0004_remove_candidate_corrected_loc'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='candidate',\n name='analytics_exp',\n field=models.FloatField(blank=True, default=0.0),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture(scope='session')
def my_setup(request):
print('\nDoing setup')
def fin():
print('\nDoing teardown')
if os.path.exists(test_generated_dir):
rmtree(test_generated_dir)
kdlc.cleanup()
request.addfinalizer(fin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
test_generated_dir = os.path.dirname(__file__) + '/generated/'
@pytest.fixture(scope='session')
def my_setup(request):
print('\nDoing setup')
def fin():
print('\nDoing teardown')
if os.path.exists(test_generated_dir):
rmtree(test_generated_dir)
kdlc.cleanup()
request.addfinalizer(fin)
<|reserved_special_token_1|>
import pytest
import kdlc
from shutil import rmtree
import os
test_generated_dir = os.path.dirname(__file__) + '/generated/'
@pytest.fixture(scope='session')
def my_setup(request):
print('\nDoing setup')
def fin():
print('\nDoing teardown')
if os.path.exists(test_generated_dir):
rmtree(test_generated_dir)
kdlc.cleanup()
request.addfinalizer(fin)
<|reserved_special_token_1|>
import pytest
import kdlc
from shutil import rmtree
import os
# from .context import kdlc
test_generated_dir = os.path.dirname(__file__) + "/generated/"
@pytest.fixture(scope="session")
def my_setup(request):
print("\nDoing setup")
def fin():
print("\nDoing teardown")
if os.path.exists(test_generated_dir):
rmtree(test_generated_dir)
kdlc.cleanup()
request.addfinalizer(fin)
|
flexible
|
{
"blob_id": "7ff029e2f0054146e438f4e4f13269e83e28c469",
"index": 8727,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture(scope='session')\ndef my_setup(request):\n print('\\nDoing setup')\n\n def fin():\n print('\\nDoing teardown')\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n kdlc.cleanup()\n request.addfinalizer(fin)\n",
"step-3": "<mask token>\ntest_generated_dir = os.path.dirname(__file__) + '/generated/'\n\n\n@pytest.fixture(scope='session')\ndef my_setup(request):\n print('\\nDoing setup')\n\n def fin():\n print('\\nDoing teardown')\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n kdlc.cleanup()\n request.addfinalizer(fin)\n",
"step-4": "import pytest\nimport kdlc\nfrom shutil import rmtree\nimport os\ntest_generated_dir = os.path.dirname(__file__) + '/generated/'\n\n\n@pytest.fixture(scope='session')\ndef my_setup(request):\n print('\\nDoing setup')\n\n def fin():\n print('\\nDoing teardown')\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n kdlc.cleanup()\n request.addfinalizer(fin)\n",
"step-5": "import pytest\nimport kdlc\nfrom shutil import rmtree\nimport os\n\n# from .context import kdlc\n\ntest_generated_dir = os.path.dirname(__file__) + \"/generated/\"\n\n\n@pytest.fixture(scope=\"session\")\ndef my_setup(request):\n print(\"\\nDoing setup\")\n\n def fin():\n print(\"\\nDoing teardown\")\n\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n\n kdlc.cleanup()\n\n request.addfinalizer(fin)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RectInsetTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class RectCloneAndMagic(TestCase):
def test_clone_and_compare(self):
rect1 = Rect(left=10, bottom=30, width=100, height=410)
rect2 = rect1.clone()
self.assertEqual(rect1, rect2)
rect2 = rect1.clone().inset(10)
self.assertNotEqual(rect1, rect2)
def test_string_representation(self):
"""
Тест, проверяющий наглядность и однозначность строкового представления
объекта прямоугольника.
Данный тест создан поздно ночью, поэтому весьма оправданы могут быть
сомнения как в корректности метода лежащего в его основе так и в
адекватности автора данного теста в момент его (теста) написания.
"""
rect = Rect(left=432548, right=876945, bottom=129543, top=410666)
srepr = repr(rect)
self.assertTrue('Rect' in srepr)
self.assertTrue('432548' in srepr)
self.assertTrue('876945' in srepr)
self.assertTrue('129543' in srepr)
self.assertTrue('410666' in srepr)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RectResizeTest(TestCase):
<|reserved_special_token_0|>
def test_resize_center(self):
self.rect.resize(width=200, height=50, origin='center-center')
self.assertEqual(self.rect.width, 200)
self.assertEqual(self.rect.height, 50)
self.assertEqual(self.rect.top, 175)
self.assertEqual(self.rect.right, 550)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_scale(self):
self.rect.scale(0.5, origin='center-center')
self.assertEqual(self.rect.width, 50)
self.assertEqual(self.rect.height, 50)
self.assertEqual(self.rect.left, 425)
self.assertEqual(self.rect.bottom, 125)
class RectMoveTest(TestCase):
def setUp(self):
self.rect = Rect(top=1, bottom=-1, left=-1, right=1)
def test_move_center(self):
self.rect.moveTo(20, 10, 'center-center')
self.assertEqual(self.rect.top, 11)
self.assertEqual(self.rect.right, 21)
self.assertEqual(self.rect.width, 2)
self.assertEqual(self.rect.height, 2)
def test_move_bottom_left(self):
self.rect.moveTo(30, 40, 'bottom-left')
self.assertEqual(self.rect.bottom, 40)
self.assertEqual(self.rect.left, 30)
self.assertEqual(self.rect.width, 2)
self.assertEqual(self.rect.height, 2)
class RectInsetTest(TestCase):
def setUp(self):
self.rect = Rect(bottom=100, top=200, left=10, right=20)
def test_inset_separate_values(self):
self.rect.inset(1, 10)
self.assertEqual(self.rect.bottom, 110)
self.assertEqual(self.rect.top, 190)
self.assertEqual(self.rect.left, 11)
self.assertEqual(self.rect.right, 19)
def test_inset_single_value(self):
self.rect.inset(2)
self.assertEqual(self.rect.bottom, 102)
self.assertEqual(self.rect.top, 198)
self.assertEqual(self.rect.left, 12)
self.assertEqual(self.rect.right, 18)
def test_inset_with_underflow(self):
self.rect.inset(51)
self.assertEqual(self.rect.bottom, 150)
self.assertEqual(self.rect.height, 0)
self.assertEqual(self.rect.left, 15)
self.assertEqual(self.rect.width, 0)
class RectCloneAndMagic(TestCase):
def test_clone_and_compare(self):
rect1 = Rect(left=10, bottom=30, width=100, height=410)
rect2 = rect1.clone()
self.assertEqual(rect1, rect2)
rect2 = rect1.clone().inset(10)
self.assertNotEqual(rect1, rect2)
def test_string_representation(self):
"""
Тест, проверяющий наглядность и однозначность строкового представления
объекта прямоугольника.
Данный тест создан поздно ночью, поэтому весьма оправданы могут быть
сомнения как в корректности метода лежащего в его основе так и в
адекватности автора данного теста в момент его (теста) написания.
"""
rect = Rect(left=432548, right=876945, bottom=129543, top=410666)
srepr = repr(rect)
self.assertTrue('Rect' in srepr)
self.assertTrue('432548' in srepr)
self.assertTrue('876945' in srepr)
self.assertTrue('129543' in srepr)
self.assertTrue('410666' in srepr)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RectSizeTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class RectResizeTest(TestCase):
def setUp(self):
self.rect = Rect(top=200, bottom=100, left=400, right=500)
def test_resize_center(self):
self.rect.resize(width=200, height=50, origin='center-center')
self.assertEqual(self.rect.width, 200)
self.assertEqual(self.rect.height, 50)
self.assertEqual(self.rect.top, 175)
self.assertEqual(self.rect.right, 550)
def test_resize_bottom_left(self):
self.rect.resize(width=253, height=68, origin='bottom-left')
self.assertEqual(self.rect.width, 253)
self.assertEqual(self.rect.height, 68)
self.assertEqual(self.rect.bottom, 100)
self.assertEqual(self.rect.left, 400)
def test_resize_top_right(self):
self.rect.resize(width=253, height=68, origin='top-right')
self.assertEqual(self.rect.width, 253)
self.assertEqual(self.rect.height, 68)
self.assertEqual(self.rect.top, 200)
self.assertEqual(self.rect.right, 500)
def test_scale(self):
self.rect.scale(0.5, origin='center-center')
self.assertEqual(self.rect.width, 50)
self.assertEqual(self.rect.height, 50)
self.assertEqual(self.rect.left, 425)
self.assertEqual(self.rect.bottom, 125)
class RectMoveTest(TestCase):
def setUp(self):
self.rect = Rect(top=1, bottom=-1, left=-1, right=1)
def test_move_center(self):
self.rect.moveTo(20, 10, 'center-center')
self.assertEqual(self.rect.top, 11)
self.assertEqual(self.rect.right, 21)
self.assertEqual(self.rect.width, 2)
self.assertEqual(self.rect.height, 2)
def test_move_bottom_left(self):
self.rect.moveTo(30, 40, 'bottom-left')
self.assertEqual(self.rect.bottom, 40)
self.assertEqual(self.rect.left, 30)
self.assertEqual(self.rect.width, 2)
self.assertEqual(self.rect.height, 2)
class RectInsetTest(TestCase):
def setUp(self):
self.rect = Rect(bottom=100, top=200, left=10, right=20)
def test_inset_separate_values(self):
self.rect.inset(1, 10)
self.assertEqual(self.rect.bottom, 110)
self.assertEqual(self.rect.top, 190)
self.assertEqual(self.rect.left, 11)
self.assertEqual(self.rect.right, 19)
def test_inset_single_value(self):
self.rect.inset(2)
self.assertEqual(self.rect.bottom, 102)
self.assertEqual(self.rect.top, 198)
self.assertEqual(self.rect.left, 12)
self.assertEqual(self.rect.right, 18)
def test_inset_with_underflow(self):
self.rect.inset(51)
self.assertEqual(self.rect.bottom, 150)
self.assertEqual(self.rect.height, 0)
self.assertEqual(self.rect.left, 15)
self.assertEqual(self.rect.width, 0)
class RectCloneAndMagic(TestCase):
def test_clone_and_compare(self):
rect1 = Rect(left=10, bottom=30, width=100, height=410)
rect2 = rect1.clone()
self.assertEqual(rect1, rect2)
rect2 = rect1.clone().inset(10)
self.assertNotEqual(rect1, rect2)
def test_string_representation(self):
"""
Тест, проверяющий наглядность и однозначность строкового представления
объекта прямоугольника.
Данный тест создан поздно ночью, поэтому весьма оправданы могут быть
сомнения как в корректности метода лежащего в его основе так и в
адекватности автора данного теста в момент его (теста) написания.
"""
rect = Rect(left=432548, right=876945, bottom=129543, top=410666)
srepr = repr(rect)
self.assertTrue('Rect' in srepr)
self.assertTrue('432548' in srepr)
self.assertTrue('876945' in srepr)
self.assertTrue('129543' in srepr)
self.assertTrue('410666' in srepr)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RectSizeTest(TestCase):
def test_sizes_from_coords(self):
rect = Rect(top=33, bottom=22, left=10, right=20)
self.assertEqual(rect.width, 10)
self.assertEqual(rect.height, 11)
<|reserved_special_token_0|>
class RectResizeTest(TestCase):
def setUp(self):
self.rect = Rect(top=200, bottom=100, left=400, right=500)
def test_resize_center(self):
self.rect.resize(width=200, height=50, origin='center-center')
self.assertEqual(self.rect.width, 200)
self.assertEqual(self.rect.height, 50)
self.assertEqual(self.rect.top, 175)
self.assertEqual(self.rect.right, 550)
def test_resize_bottom_left(self):
self.rect.resize(width=253, height=68, origin='bottom-left')
self.assertEqual(self.rect.width, 253)
self.assertEqual(self.rect.height, 68)
self.assertEqual(self.rect.bottom, 100)
self.assertEqual(self.rect.left, 400)
def test_resize_top_right(self):
self.rect.resize(width=253, height=68, origin='top-right')
self.assertEqual(self.rect.width, 253)
self.assertEqual(self.rect.height, 68)
self.assertEqual(self.rect.top, 200)
self.assertEqual(self.rect.right, 500)
def test_scale(self):
self.rect.scale(0.5, origin='center-center')
self.assertEqual(self.rect.width, 50)
self.assertEqual(self.rect.height, 50)
self.assertEqual(self.rect.left, 425)
self.assertEqual(self.rect.bottom, 125)
class RectMoveTest(TestCase):
def setUp(self):
self.rect = Rect(top=1, bottom=-1, left=-1, right=1)
def test_move_center(self):
self.rect.moveTo(20, 10, 'center-center')
self.assertEqual(self.rect.top, 11)
self.assertEqual(self.rect.right, 21)
self.assertEqual(self.rect.width, 2)
self.assertEqual(self.rect.height, 2)
def test_move_bottom_left(self):
self.rect.moveTo(30, 40, 'bottom-left')
self.assertEqual(self.rect.bottom, 40)
self.assertEqual(self.rect.left, 30)
self.assertEqual(self.rect.width, 2)
self.assertEqual(self.rect.height, 2)
class RectInsetTest(TestCase):
def setUp(self):
self.rect = Rect(bottom=100, top=200, left=10, right=20)
def test_inset_separate_values(self):
self.rect.inset(1, 10)
self.assertEqual(self.rect.bottom, 110)
self.assertEqual(self.rect.top, 190)
self.assertEqual(self.rect.left, 11)
self.assertEqual(self.rect.right, 19)
def test_inset_single_value(self):
self.rect.inset(2)
self.assertEqual(self.rect.bottom, 102)
self.assertEqual(self.rect.top, 198)
self.assertEqual(self.rect.left, 12)
self.assertEqual(self.rect.right, 18)
def test_inset_with_underflow(self):
self.rect.inset(51)
self.assertEqual(self.rect.bottom, 150)
self.assertEqual(self.rect.height, 0)
self.assertEqual(self.rect.left, 15)
self.assertEqual(self.rect.width, 0)
class RectCloneAndMagic(TestCase):
def test_clone_and_compare(self):
rect1 = Rect(left=10, bottom=30, width=100, height=410)
rect2 = rect1.clone()
self.assertEqual(rect1, rect2)
rect2 = rect1.clone().inset(10)
self.assertNotEqual(rect1, rect2)
def test_string_representation(self):
"""
Тест, проверяющий наглядность и однозначность строкового представления
объекта прямоугольника.
Данный тест создан поздно ночью, поэтому весьма оправданы могут быть
сомнения как в корректности метода лежащего в его основе так и в
адекватности автора данного теста в момент его (теста) написания.
"""
rect = Rect(left=432548, right=876945, bottom=129543, top=410666)
srepr = repr(rect)
self.assertTrue('Rect' in srepr)
self.assertTrue('432548' in srepr)
self.assertTrue('876945' in srepr)
self.assertTrue('129543' in srepr)
self.assertTrue('410666' in srepr)
<|reserved_special_token_1|>
# coding=UTF-8
from unittest import TestCase
from fwk.util.rect import Rect
class RectSizeTest(TestCase):
def test_sizes_from_coords(self):
rect = Rect(top=33,bottom=22,left=10,right=20)
self.assertEqual(rect.width,10)
self.assertEqual(rect.height,11)
def test_sizes_from_sizes(self):
rect = Rect(top=23,height=48,left=64,width=67)
self.assertEqual(rect.width,67)
self.assertEqual(rect.height,48)
class RectResizeTest(TestCase):
def setUp(self):
self.rect = Rect(top=200,bottom=100,left=400,right=500)
def test_resize_center(self):
self.rect.resize(width=200,height=50,origin='center-center')
self.assertEqual(self.rect.width,200)
self.assertEqual(self.rect.height,50)
self.assertEqual(self.rect.top,175)
self.assertEqual(self.rect.right,550)
def test_resize_bottom_left(self):
self.rect.resize(width=253,height=68,origin='bottom-left')
self.assertEqual(self.rect.width,253)
self.assertEqual(self.rect.height,68)
self.assertEqual(self.rect.bottom,100)
self.assertEqual(self.rect.left,400)
def test_resize_top_right(self):
self.rect.resize(width=253,height=68,origin='top-right')
self.assertEqual(self.rect.width,253)
self.assertEqual(self.rect.height,68)
self.assertEqual(self.rect.top,200)
self.assertEqual(self.rect.right,500)
def test_scale(self):
self.rect.scale(0.5,origin='center-center')
self.assertEqual(self.rect.width,50)
self.assertEqual(self.rect.height,50)
self.assertEqual(self.rect.left,425)
self.assertEqual(self.rect.bottom,125)
class RectMoveTest(TestCase):
def setUp(self):
self.rect = Rect(top=1,bottom=-1,left=-1,right=1)
def test_move_center(self):
self.rect.moveTo(20,10,'center-center')
self.assertEqual(self.rect.top,11)
self.assertEqual(self.rect.right,21)
self.assertEqual(self.rect.width,2)
self.assertEqual(self.rect.height,2)
def test_move_bottom_left(self):
self.rect.moveTo(30,40,'bottom-left')
self.assertEqual(self.rect.bottom,40)
self.assertEqual(self.rect.left,30)
self.assertEqual(self.rect.width,2)
self.assertEqual(self.rect.height,2)
class RectInsetTest(TestCase):
def setUp(self):
self.rect = Rect(bottom=100,top=200,left=10,right=20)
def test_inset_separate_values(self):
self.rect.inset(1,10)
self.assertEqual(self.rect.bottom,110)
self.assertEqual(self.rect.top,190)
self.assertEqual(self.rect.left,11)
self.assertEqual(self.rect.right,19)
def test_inset_single_value(self):
self.rect.inset(2)
self.assertEqual(self.rect.bottom,102)
self.assertEqual(self.rect.top,198)
self.assertEqual(self.rect.left,12)
self.assertEqual(self.rect.right,18)
def test_inset_with_underflow(self):
self.rect.inset(51)
self.assertEqual(self.rect.bottom,150)
self.assertEqual(self.rect.height,0)
self.assertEqual(self.rect.left,15)
self.assertEqual(self.rect.width,0)
class RectCloneAndMagic(TestCase):
def test_clone_and_compare(self):
rect1 = Rect(left=10,bottom=30,width=100,height=410)
rect2 = rect1.clone()
self.assertEqual(rect1,rect2)
rect2 = rect1.clone().inset(10)
self.assertNotEqual(rect1,rect2)
def test_string_representation(self):
'''
Тест, проверяющий наглядность и однозначность строкового представления
объекта прямоугольника.
Данный тест создан поздно ночью, поэтому весьма оправданы могут быть
сомнения как в корректности метода лежащего в его основе так и в
адекватности автора данного теста в момент его (теста) написания.
'''
rect = Rect(left=432548,right=876945,bottom=129543,top=410666)
srepr = repr(rect)
self.assertTrue('Rect' in srepr)
self.assertTrue('432548' in srepr)
self.assertTrue('876945' in srepr)
self.assertTrue('129543' in srepr)
self.assertTrue('410666' in srepr)
|
flexible
|
{
"blob_id": "ff65e92699c6c9379ac40397b3318c3f6bf7d49a",
"index": 3720,
"step-1": "<mask token>\n\n\nclass RectInsetTest(TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass RectCloneAndMagic(TestCase):\n\n def test_clone_and_compare(self):\n rect1 = Rect(left=10, bottom=30, width=100, height=410)\n rect2 = rect1.clone()\n self.assertEqual(rect1, rect2)\n rect2 = rect1.clone().inset(10)\n self.assertNotEqual(rect1, rect2)\n\n def test_string_representation(self):\n \"\"\"\n\t\tТест, проверяющий наглядность и однозначность строкового представления\n\t\t\tобъекта прямоугольника.\n\n\t\tДанный тест создан поздно ночью, поэтому весьма оправданы могут быть\n\t\t\tсомнения как в корректности метода лежащего в его основе так и в\n\t\t\tадекватности автора данного теста в момент его (теста) написания.\n\t\t\"\"\"\n rect = Rect(left=432548, right=876945, bottom=129543, top=410666)\n srepr = repr(rect)\n self.assertTrue('Rect' in srepr)\n self.assertTrue('432548' in srepr)\n self.assertTrue('876945' in srepr)\n self.assertTrue('129543' in srepr)\n self.assertTrue('410666' in srepr)\n",
"step-2": "<mask token>\n\n\nclass RectResizeTest(TestCase):\n <mask token>\n\n def test_resize_center(self):\n self.rect.resize(width=200, height=50, origin='center-center')\n self.assertEqual(self.rect.width, 200)\n self.assertEqual(self.rect.height, 50)\n self.assertEqual(self.rect.top, 175)\n self.assertEqual(self.rect.right, 550)\n <mask token>\n <mask token>\n\n def test_scale(self):\n self.rect.scale(0.5, origin='center-center')\n self.assertEqual(self.rect.width, 50)\n self.assertEqual(self.rect.height, 50)\n self.assertEqual(self.rect.left, 425)\n self.assertEqual(self.rect.bottom, 125)\n\n\nclass RectMoveTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(top=1, bottom=-1, left=-1, right=1)\n\n def test_move_center(self):\n self.rect.moveTo(20, 10, 'center-center')\n self.assertEqual(self.rect.top, 11)\n self.assertEqual(self.rect.right, 21)\n self.assertEqual(self.rect.width, 2)\n self.assertEqual(self.rect.height, 2)\n\n def test_move_bottom_left(self):\n self.rect.moveTo(30, 40, 'bottom-left')\n self.assertEqual(self.rect.bottom, 40)\n self.assertEqual(self.rect.left, 30)\n self.assertEqual(self.rect.width, 2)\n self.assertEqual(self.rect.height, 2)\n\n\nclass RectInsetTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(bottom=100, top=200, left=10, right=20)\n\n def test_inset_separate_values(self):\n self.rect.inset(1, 10)\n self.assertEqual(self.rect.bottom, 110)\n self.assertEqual(self.rect.top, 190)\n self.assertEqual(self.rect.left, 11)\n self.assertEqual(self.rect.right, 19)\n\n def test_inset_single_value(self):\n self.rect.inset(2)\n self.assertEqual(self.rect.bottom, 102)\n self.assertEqual(self.rect.top, 198)\n self.assertEqual(self.rect.left, 12)\n self.assertEqual(self.rect.right, 18)\n\n def test_inset_with_underflow(self):\n self.rect.inset(51)\n self.assertEqual(self.rect.bottom, 150)\n self.assertEqual(self.rect.height, 0)\n self.assertEqual(self.rect.left, 15)\n self.assertEqual(self.rect.width, 0)\n\n\nclass RectCloneAndMagic(TestCase):\n\n def test_clone_and_compare(self):\n rect1 = Rect(left=10, bottom=30, width=100, height=410)\n rect2 = rect1.clone()\n self.assertEqual(rect1, rect2)\n rect2 = rect1.clone().inset(10)\n self.assertNotEqual(rect1, rect2)\n\n def test_string_representation(self):\n \"\"\"\n\t\tТест, проверяющий наглядность и однозначность строкового представления\n\t\t\tобъекта прямоугольника.\n\n\t\tДанный тест создан поздно ночью, поэтому весьма оправданы могут быть\n\t\t\tсомнения как в корректности метода лежащего в его основе так и в\n\t\t\tадекватности автора данного теста в момент его (теста) написания.\n\t\t\"\"\"\n rect = Rect(left=432548, right=876945, bottom=129543, top=410666)\n srepr = repr(rect)\n self.assertTrue('Rect' in srepr)\n self.assertTrue('432548' in srepr)\n self.assertTrue('876945' in srepr)\n self.assertTrue('129543' in srepr)\n self.assertTrue('410666' in srepr)\n",
"step-3": "<mask token>\n\n\nclass RectSizeTest(TestCase):\n <mask token>\n <mask token>\n\n\nclass RectResizeTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(top=200, bottom=100, left=400, right=500)\n\n def test_resize_center(self):\n self.rect.resize(width=200, height=50, origin='center-center')\n self.assertEqual(self.rect.width, 200)\n self.assertEqual(self.rect.height, 50)\n self.assertEqual(self.rect.top, 175)\n self.assertEqual(self.rect.right, 550)\n\n def test_resize_bottom_left(self):\n self.rect.resize(width=253, height=68, origin='bottom-left')\n self.assertEqual(self.rect.width, 253)\n self.assertEqual(self.rect.height, 68)\n self.assertEqual(self.rect.bottom, 100)\n self.assertEqual(self.rect.left, 400)\n\n def test_resize_top_right(self):\n self.rect.resize(width=253, height=68, origin='top-right')\n self.assertEqual(self.rect.width, 253)\n self.assertEqual(self.rect.height, 68)\n self.assertEqual(self.rect.top, 200)\n self.assertEqual(self.rect.right, 500)\n\n def test_scale(self):\n self.rect.scale(0.5, origin='center-center')\n self.assertEqual(self.rect.width, 50)\n self.assertEqual(self.rect.height, 50)\n self.assertEqual(self.rect.left, 425)\n self.assertEqual(self.rect.bottom, 125)\n\n\nclass RectMoveTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(top=1, bottom=-1, left=-1, right=1)\n\n def test_move_center(self):\n self.rect.moveTo(20, 10, 'center-center')\n self.assertEqual(self.rect.top, 11)\n self.assertEqual(self.rect.right, 21)\n self.assertEqual(self.rect.width, 2)\n self.assertEqual(self.rect.height, 2)\n\n def test_move_bottom_left(self):\n self.rect.moveTo(30, 40, 'bottom-left')\n self.assertEqual(self.rect.bottom, 40)\n self.assertEqual(self.rect.left, 30)\n self.assertEqual(self.rect.width, 2)\n self.assertEqual(self.rect.height, 2)\n\n\nclass RectInsetTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(bottom=100, top=200, left=10, right=20)\n\n def test_inset_separate_values(self):\n self.rect.inset(1, 10)\n self.assertEqual(self.rect.bottom, 110)\n self.assertEqual(self.rect.top, 190)\n self.assertEqual(self.rect.left, 11)\n self.assertEqual(self.rect.right, 19)\n\n def test_inset_single_value(self):\n self.rect.inset(2)\n self.assertEqual(self.rect.bottom, 102)\n self.assertEqual(self.rect.top, 198)\n self.assertEqual(self.rect.left, 12)\n self.assertEqual(self.rect.right, 18)\n\n def test_inset_with_underflow(self):\n self.rect.inset(51)\n self.assertEqual(self.rect.bottom, 150)\n self.assertEqual(self.rect.height, 0)\n self.assertEqual(self.rect.left, 15)\n self.assertEqual(self.rect.width, 0)\n\n\nclass RectCloneAndMagic(TestCase):\n\n def test_clone_and_compare(self):\n rect1 = Rect(left=10, bottom=30, width=100, height=410)\n rect2 = rect1.clone()\n self.assertEqual(rect1, rect2)\n rect2 = rect1.clone().inset(10)\n self.assertNotEqual(rect1, rect2)\n\n def test_string_representation(self):\n \"\"\"\n\t\tТест, проверяющий наглядность и однозначность строкового представления\n\t\t\tобъекта прямоугольника.\n\n\t\tДанный тест создан поздно ночью, поэтому весьма оправданы могут быть\n\t\t\tсомнения как в корректности метода лежащего в его основе так и в\n\t\t\tадекватности автора данного теста в момент его (теста) написания.\n\t\t\"\"\"\n rect = Rect(left=432548, right=876945, bottom=129543, top=410666)\n srepr = repr(rect)\n self.assertTrue('Rect' in srepr)\n self.assertTrue('432548' in srepr)\n self.assertTrue('876945' in srepr)\n self.assertTrue('129543' in srepr)\n self.assertTrue('410666' in srepr)\n",
"step-4": "<mask token>\n\n\nclass RectSizeTest(TestCase):\n\n def test_sizes_from_coords(self):\n rect = Rect(top=33, bottom=22, left=10, right=20)\n self.assertEqual(rect.width, 10)\n self.assertEqual(rect.height, 11)\n <mask token>\n\n\nclass RectResizeTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(top=200, bottom=100, left=400, right=500)\n\n def test_resize_center(self):\n self.rect.resize(width=200, height=50, origin='center-center')\n self.assertEqual(self.rect.width, 200)\n self.assertEqual(self.rect.height, 50)\n self.assertEqual(self.rect.top, 175)\n self.assertEqual(self.rect.right, 550)\n\n def test_resize_bottom_left(self):\n self.rect.resize(width=253, height=68, origin='bottom-left')\n self.assertEqual(self.rect.width, 253)\n self.assertEqual(self.rect.height, 68)\n self.assertEqual(self.rect.bottom, 100)\n self.assertEqual(self.rect.left, 400)\n\n def test_resize_top_right(self):\n self.rect.resize(width=253, height=68, origin='top-right')\n self.assertEqual(self.rect.width, 253)\n self.assertEqual(self.rect.height, 68)\n self.assertEqual(self.rect.top, 200)\n self.assertEqual(self.rect.right, 500)\n\n def test_scale(self):\n self.rect.scale(0.5, origin='center-center')\n self.assertEqual(self.rect.width, 50)\n self.assertEqual(self.rect.height, 50)\n self.assertEqual(self.rect.left, 425)\n self.assertEqual(self.rect.bottom, 125)\n\n\nclass RectMoveTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(top=1, bottom=-1, left=-1, right=1)\n\n def test_move_center(self):\n self.rect.moveTo(20, 10, 'center-center')\n self.assertEqual(self.rect.top, 11)\n self.assertEqual(self.rect.right, 21)\n self.assertEqual(self.rect.width, 2)\n self.assertEqual(self.rect.height, 2)\n\n def test_move_bottom_left(self):\n self.rect.moveTo(30, 40, 'bottom-left')\n self.assertEqual(self.rect.bottom, 40)\n self.assertEqual(self.rect.left, 30)\n self.assertEqual(self.rect.width, 2)\n self.assertEqual(self.rect.height, 2)\n\n\nclass RectInsetTest(TestCase):\n\n def setUp(self):\n self.rect = Rect(bottom=100, top=200, left=10, right=20)\n\n def test_inset_separate_values(self):\n self.rect.inset(1, 10)\n self.assertEqual(self.rect.bottom, 110)\n self.assertEqual(self.rect.top, 190)\n self.assertEqual(self.rect.left, 11)\n self.assertEqual(self.rect.right, 19)\n\n def test_inset_single_value(self):\n self.rect.inset(2)\n self.assertEqual(self.rect.bottom, 102)\n self.assertEqual(self.rect.top, 198)\n self.assertEqual(self.rect.left, 12)\n self.assertEqual(self.rect.right, 18)\n\n def test_inset_with_underflow(self):\n self.rect.inset(51)\n self.assertEqual(self.rect.bottom, 150)\n self.assertEqual(self.rect.height, 0)\n self.assertEqual(self.rect.left, 15)\n self.assertEqual(self.rect.width, 0)\n\n\nclass RectCloneAndMagic(TestCase):\n\n def test_clone_and_compare(self):\n rect1 = Rect(left=10, bottom=30, width=100, height=410)\n rect2 = rect1.clone()\n self.assertEqual(rect1, rect2)\n rect2 = rect1.clone().inset(10)\n self.assertNotEqual(rect1, rect2)\n\n def test_string_representation(self):\n \"\"\"\n\t\tТест, проверяющий наглядность и однозначность строкового представления\n\t\t\tобъекта прямоугольника.\n\n\t\tДанный тест создан поздно ночью, поэтому весьма оправданы могут быть\n\t\t\tсомнения как в корректности метода лежащего в его основе так и в\n\t\t\tадекватности автора данного теста в момент его (теста) написания.\n\t\t\"\"\"\n rect = Rect(left=432548, right=876945, bottom=129543, top=410666)\n srepr = repr(rect)\n self.assertTrue('Rect' in srepr)\n self.assertTrue('432548' in srepr)\n self.assertTrue('876945' in srepr)\n self.assertTrue('129543' in srepr)\n self.assertTrue('410666' in srepr)\n",
"step-5": "# coding=UTF-8\nfrom unittest import TestCase\n\nfrom fwk.util.rect import Rect\n\nclass RectSizeTest(TestCase):\n\tdef test_sizes_from_coords(self):\n\t\trect = Rect(top=33,bottom=22,left=10,right=20)\n\t\tself.assertEqual(rect.width,10)\n\t\tself.assertEqual(rect.height,11)\n\n\tdef test_sizes_from_sizes(self):\n\t\trect = Rect(top=23,height=48,left=64,width=67)\n\t\tself.assertEqual(rect.width,67)\n\t\tself.assertEqual(rect.height,48)\n\nclass RectResizeTest(TestCase):\n\tdef setUp(self):\n\t\tself.rect = Rect(top=200,bottom=100,left=400,right=500)\n\tdef test_resize_center(self):\n\t\tself.rect.resize(width=200,height=50,origin='center-center')\n\t\tself.assertEqual(self.rect.width,200)\n\t\tself.assertEqual(self.rect.height,50)\n\t\tself.assertEqual(self.rect.top,175)\n\t\tself.assertEqual(self.rect.right,550)\n\n\tdef test_resize_bottom_left(self):\n\t\tself.rect.resize(width=253,height=68,origin='bottom-left')\n\t\tself.assertEqual(self.rect.width,253)\n\t\tself.assertEqual(self.rect.height,68)\n\t\tself.assertEqual(self.rect.bottom,100)\n\t\tself.assertEqual(self.rect.left,400)\n\n\tdef test_resize_top_right(self):\n\t\tself.rect.resize(width=253,height=68,origin='top-right')\n\t\tself.assertEqual(self.rect.width,253)\n\t\tself.assertEqual(self.rect.height,68)\n\t\tself.assertEqual(self.rect.top,200)\n\t\tself.assertEqual(self.rect.right,500)\n\n\tdef test_scale(self):\n\t\tself.rect.scale(0.5,origin='center-center')\n\t\tself.assertEqual(self.rect.width,50)\n\t\tself.assertEqual(self.rect.height,50)\n\t\tself.assertEqual(self.rect.left,425)\n\t\tself.assertEqual(self.rect.bottom,125)\n\nclass RectMoveTest(TestCase):\n\tdef setUp(self):\n\t\tself.rect = Rect(top=1,bottom=-1,left=-1,right=1)\n\tdef test_move_center(self):\n\t\tself.rect.moveTo(20,10,'center-center')\n\t\tself.assertEqual(self.rect.top,11)\n\t\tself.assertEqual(self.rect.right,21)\n\t\tself.assertEqual(self.rect.width,2)\n\t\tself.assertEqual(self.rect.height,2)\n\n\tdef test_move_bottom_left(self):\n\t\tself.rect.moveTo(30,40,'bottom-left')\n\t\tself.assertEqual(self.rect.bottom,40)\n\t\tself.assertEqual(self.rect.left,30)\n\t\tself.assertEqual(self.rect.width,2)\n\t\tself.assertEqual(self.rect.height,2)\n\nclass RectInsetTest(TestCase):\n\tdef setUp(self):\n\t\tself.rect = Rect(bottom=100,top=200,left=10,right=20)\n\tdef test_inset_separate_values(self):\n\t\tself.rect.inset(1,10)\n\t\tself.assertEqual(self.rect.bottom,110)\n\t\tself.assertEqual(self.rect.top,190)\n\t\tself.assertEqual(self.rect.left,11)\n\t\tself.assertEqual(self.rect.right,19)\n\n\tdef test_inset_single_value(self):\n\t\tself.rect.inset(2)\n\t\tself.assertEqual(self.rect.bottom,102)\n\t\tself.assertEqual(self.rect.top,198)\n\t\tself.assertEqual(self.rect.left,12)\n\t\tself.assertEqual(self.rect.right,18)\n\n\tdef test_inset_with_underflow(self):\n\t\tself.rect.inset(51)\n\t\tself.assertEqual(self.rect.bottom,150)\n\t\tself.assertEqual(self.rect.height,0)\n\t\tself.assertEqual(self.rect.left,15)\n\t\tself.assertEqual(self.rect.width,0)\n\nclass RectCloneAndMagic(TestCase):\n\tdef test_clone_and_compare(self):\n\t\trect1 = Rect(left=10,bottom=30,width=100,height=410)\n\t\trect2 = rect1.clone()\n\t\tself.assertEqual(rect1,rect2)\n\t\trect2 = rect1.clone().inset(10)\n\t\tself.assertNotEqual(rect1,rect2)\n\n\tdef test_string_representation(self):\n\t\t'''\n\t\tТест, проверяющий наглядность и однозначность строкового представления\n\t\t\tобъекта прямоугольника.\n\n\t\tДанный тест создан поздно ночью, поэтому весьма оправданы могут быть\n\t\t\tсомнения как в корректности метода лежащего в его основе так и в\n\t\t\tадекватности автора данного теста в момент его (теста) написания.\n\t\t'''\n\t\trect = Rect(left=432548,right=876945,bottom=129543,top=410666)\n\t\tsrepr = repr(rect)\n\t\tself.assertTrue('Rect' in srepr)\n\t\tself.assertTrue('432548' in srepr)\n\t\tself.assertTrue('876945' in srepr)\n\t\tself.assertTrue('129543' in srepr)\n\t\tself.assertTrue('410666' in srepr)\n",
"step-ids": [
4,
15,
19,
20,
23
]
}
|
[
4,
15,
19,
20,
23
] |
<|reserved_special_token_0|>
class RandomProjectionsFeature(PipelineNode):
<|reserved_special_token_0|>
def get_dtype(self):
return self._dtype
<|reserved_special_token_0|>
class RandomProjectionsEnergyFeature(PipelineNode):
def __init__(self, recording, name='random_projections_energy_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.projections = projections
self.min_values = min_values
self.local_radius_um = local_radius_um
self._kwargs.update(dict(projections=projections, min_values=
min_values, local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)
if self.min_values is not None:
energies = (energies / self.min_values[chan_inds]) ** 4
denom = np.sum(energies, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(energies[mask],
local_projections) / denom[mask][:, np.newaxis]
return all_projections
class StdPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='std_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class GlobalPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='global_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))
return all_ptps
class KurtosisPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='kurtosis_ptp_feature',
return_output=True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
import scipy
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class EnergyFeature(PipelineNode):
def __init__(self, recording, name='energy_feature', return_output=True,
parents=None, local_radius_um=50.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
def get_dtype(self):
return np.dtype('float32')
def compute(self, traces, peaks, waveforms):
energy = np.zeros(peaks.size, dtype='float32')
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size
return energy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RandomProjectionsFeature(PipelineNode):
def __init__(self, recording, name='random_projections_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.projections = projections
self.local_radius_um = local_radius_um
self.min_values = min_values
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(projections=projections, local_radius_um=
local_radius_um, min_values=min_values))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)
if self.min_values is not None:
wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4
denom = np.sum(wf_ptp, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections
) / denom[mask][:, np.newaxis]
return all_projections
class RandomProjectionsEnergyFeature(PipelineNode):
def __init__(self, recording, name='random_projections_energy_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.projections = projections
self.min_values = min_values
self.local_radius_um = local_radius_um
self._kwargs.update(dict(projections=projections, min_values=
min_values, local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)
if self.min_values is not None:
energies = (energies / self.min_values[chan_inds]) ** 4
denom = np.sum(energies, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(energies[mask],
local_projections) / denom[mask][:, np.newaxis]
return all_projections
class StdPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='std_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class GlobalPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='global_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))
return all_ptps
class KurtosisPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='kurtosis_ptp_feature',
return_output=True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
import scipy
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class EnergyFeature(PipelineNode):
def __init__(self, recording, name='energy_feature', return_output=True,
parents=None, local_radius_um=50.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
def get_dtype(self):
return np.dtype('float32')
def compute(self, traces, peaks, waveforms):
energy = np.zeros(peaks.size, dtype='float32')
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size
return energy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PeakToPeakLagsFeature(PipelineNode):
def __init__(self, recording, name='ptp_lag_feature', return_output=
True, parents=None, local_radius_um=150.0, all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.all_channels = all_channels
self.local_radius_um = local_radius_um
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um,
all_channels=all_channels))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_maxs = np.argmax(waveforms, axis=1)
all_mins = np.argmin(waveforms, axis=1)
all_lags = all_maxs - all_mins
else:
all_lags = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
maxs = np.argmax(wfs, axis=1)
mins = np.argmin(wfs, axis=1)
lags = maxs - mins
ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)
all_lags[idx] = lags[np.arange(len(idx)), ptps]
return all_lags
class RandomProjectionsFeature(PipelineNode):
def __init__(self, recording, name='random_projections_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.projections = projections
self.local_radius_um = local_radius_um
self.min_values = min_values
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(projections=projections, local_radius_um=
local_radius_um, min_values=min_values))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)
if self.min_values is not None:
wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4
denom = np.sum(wf_ptp, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections
) / denom[mask][:, np.newaxis]
return all_projections
class RandomProjectionsEnergyFeature(PipelineNode):
def __init__(self, recording, name='random_projections_energy_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.projections = projections
self.min_values = min_values
self.local_radius_um = local_radius_um
self._kwargs.update(dict(projections=projections, min_values=
min_values, local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)
if self.min_values is not None:
energies = (energies / self.min_values[chan_inds]) ** 4
denom = np.sum(energies, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(energies[mask],
local_projections) / denom[mask][:, np.newaxis]
return all_projections
class StdPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='std_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class GlobalPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='global_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))
return all_ptps
class KurtosisPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='kurtosis_ptp_feature',
return_output=True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
import scipy
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class EnergyFeature(PipelineNode):
def __init__(self, recording, name='energy_feature', return_output=True,
parents=None, local_radius_um=50.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
def get_dtype(self):
return np.dtype('float32')
def compute(self, traces, peaks, waveforms):
energy = np.zeros(peaks.size, dtype='float32')
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size
return energy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='ptp_feature', return_output=True,
parents=None, local_radius_um=150.0, all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.all_channels = all_channels
self._kwargs.update(dict(local_radius_um=local_radius_um,
all_channels=all_channels))
self._dtype = recording.get_dtype()
<|reserved_special_token_0|>
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_ptps = np.ptp(waveforms, axis=1)
else:
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(np.ptp(wfs, axis=1))
return all_ptps
class PeakToPeakLagsFeature(PipelineNode):
def __init__(self, recording, name='ptp_lag_feature', return_output=
True, parents=None, local_radius_um=150.0, all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.all_channels = all_channels
self.local_radius_um = local_radius_um
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um,
all_channels=all_channels))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_maxs = np.argmax(waveforms, axis=1)
all_mins = np.argmin(waveforms, axis=1)
all_lags = all_maxs - all_mins
else:
all_lags = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
maxs = np.argmax(wfs, axis=1)
mins = np.argmin(wfs, axis=1)
lags = maxs - mins
ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)
all_lags[idx] = lags[np.arange(len(idx)), ptps]
return all_lags
class RandomProjectionsFeature(PipelineNode):
def __init__(self, recording, name='random_projections_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.projections = projections
self.local_radius_um = local_radius_um
self.min_values = min_values
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(projections=projections, local_radius_um=
local_radius_um, min_values=min_values))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)
if self.min_values is not None:
wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4
denom = np.sum(wf_ptp, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections
) / denom[mask][:, np.newaxis]
return all_projections
class RandomProjectionsEnergyFeature(PipelineNode):
def __init__(self, recording, name='random_projections_energy_feature',
return_output=True, parents=None, projections=None, local_radius_um
=150.0, min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.projections = projections
self.min_values = min_values
self.local_radius_um = local_radius_um
self._kwargs.update(dict(projections=projections, min_values=
min_values, local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]),
dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)
if self.min_values is not None:
energies = (energies / self.min_values[chan_inds]) ** 4
denom = np.sum(energies, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(energies[mask],
local_projections) / denom[mask][:, np.newaxis]
return all_projections
class StdPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='std_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class GlobalPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='global_ptp_feature', return_output=
True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))
return all_ptps
class KurtosisPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='kurtosis_ptp_feature',
return_output=True, parents=None, local_radius_um=150.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
import scipy
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class EnergyFeature(PipelineNode):
def __init__(self, recording, name='energy_feature', return_output=True,
parents=None, local_radius_um=50.0):
PipelineNode.__init__(self, recording, return_output=return_output,
parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
def get_dtype(self):
return np.dtype('float32')
def compute(self, traces, peaks, waveforms):
energy = np.zeros(peaks.size, dtype='float32')
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size
return energy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""Sorting components: peak waveform features."""
import numpy as np
from spikeinterface.core.job_tools import fix_job_kwargs
from spikeinterface.core import get_channel_distances
from spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeMonopolarTriangulation
from spikeinterface.sortingcomponents.peak_pipeline import run_peak_pipeline, PipelineNode, ExtractDenseWaveforms
def compute_features_from_peaks(
recording,
peaks,
feature_list=["ptp", ],
feature_params={},
ms_before=1.,
ms_after=1.,
**job_kwargs,
):
"""Extract features on the fly from the recording given a list of peaks.
Parameters
----------
recording: RecordingExtractor
The recording extractor object.
peaks: array
Peaks array, as returned by detect_peaks() in "compact_numpy" way.
feature_list: List of features to be computed.
- amplitude
- ptp
- com
- energy
ms_before: float
The duration in ms before the peak for extracting the features (default 1 ms)
ms_after: float
The duration in ms after the peakfor extracting the features (default 1 ms)
{}
Returns
-------
A tuple of features. Even if there is one feature.
Every feature have shape[0] == peaks.shape[0].
dtype and other dim depends on features.
"""
job_kwargs = fix_job_kwargs(job_kwargs)
extract_dense_waveforms = ExtractDenseWaveforms(recording, ms_before=ms_before, ms_after=ms_after, return_output=False)
nodes = [
extract_dense_waveforms,
]
for feature_name in feature_list:
Class = _features_class[feature_name]
params = feature_params.get(feature_name, {}).copy()
node = Class(recording, parents=[extract_dense_waveforms], **params)
nodes.append(node)
features = run_peak_pipeline(recording, peaks, nodes, job_kwargs, job_name='features_from_peaks', squeeze_output=False)
return features
class AmplitudeFeature(PipelineNode):
def __init__(self, recording, name='amplitude_feature', return_output=True, parents=None,
all_channels=False, peak_sign='neg'):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.all_channels = all_channels
self.peak_sign = peak_sign
self._kwargs.update(dict(all_channels=all_channels, peak_sign=peak_sign))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
if self.peak_sign == 'neg':
amplitudes = np.min(waveforms, axis=1)
elif self.peak_sign == 'pos':
amplitudes = np.max(waveforms, axis=1)
elif self.peak_sign == 'both':
amplitudes = np.max(np.abs(waveforms, axis=1))
else:
if self.peak_sign == 'neg':
amplitudes = np.min(waveforms, axis=(1, 2))
elif self.peak_sign == 'pos':
amplitudes = np.max(waveforms, axis=(1, 2))
elif self.peak_sign == 'both':
amplitudes = np.max(np.abs(waveforms), axis=(1, 2))
return amplitudes
class PeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='ptp_feature', return_output=True, parents=None,
local_radius_um=150., all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.all_channels = all_channels
self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_ptps = np.ptp(waveforms, axis=1)
else:
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(np.ptp(wfs, axis=1))
return all_ptps
class PeakToPeakLagsFeature(PipelineNode):
def __init__(self, recording, name='ptp_lag_feature', return_output=True, parents=None,
local_radius_um=150., all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.all_channels = all_channels
self.local_radius_um = local_radius_um
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_maxs = np.argmax(waveforms, axis=1)
all_mins = np.argmin(waveforms, axis=1)
all_lags = all_maxs - all_mins
else:
all_lags = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
maxs = np.argmax(wfs, axis=1)
mins = np.argmin(wfs, axis=1)
lags = maxs - mins
ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)
all_lags[idx] = lags[np.arange(len(idx)), ptps]
return all_lags
class RandomProjectionsFeature(PipelineNode):
def __init__(self, recording, name='random_projections_feature', return_output=True, parents=None,
projections=None, local_radius_um=150., min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.projections = projections
self.local_radius_um = local_radius_um
self.min_values = min_values
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(projections=projections, local_radius_um=local_radius_um, min_values=min_values))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1)
if self.min_values is not None:
wf_ptp = (wf_ptp/self.min_values[chan_inds])**4
denom = np.sum(wf_ptp, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections)/(denom[mask][:, np.newaxis])
return all_projections
class RandomProjectionsEnergyFeature(PipelineNode):
def __init__(self, recording, name='random_projections_energy_feature', return_output=True, parents=None,
projections=None, local_radius_um=150., min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.projections = projections
self.min_values = min_values
self.local_radius_um = local_radius_um
self._kwargs.update(dict(projections=projections, min_values=min_values, local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)
if self.min_values is not None:
energies = (energies/self.min_values[chan_inds])**4
denom = np.sum(energies, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(energies[mask], local_projections)/(denom[mask][:, np.newaxis])
return all_projections
class StdPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='std_ptp_feature', return_output=True, parents=None,
local_radius_um=150.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class GlobalPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='global_ptp_feature', return_output=True, parents=None,
local_radius_um=150.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))
return all_ptps
class KurtosisPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='kurtosis_ptp_feature', return_output=True, parents=None,
local_radius_um=150.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
import scipy
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class EnergyFeature(PipelineNode):
def __init__(self, recording, name='energy_feature', return_output=True, parents=None,
local_radius_um=50.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
def get_dtype(self):
return np.dtype('float32')
def compute(self, traces, peaks, waveforms):
energy = np.zeros(peaks.size, dtype='float32')
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size
return energy
_features_class = {
'amplitude': AmplitudeFeature,
'ptp' : PeakToPeakFeature,
'center_of_mass' : LocalizeCenterOfMass,
'monopolar_triangulation' : LocalizeMonopolarTriangulation,
'energy' : EnergyFeature,
'std_ptp' : StdPeakToPeakFeature,
'kurtosis_ptp' : KurtosisPeakToPeakFeature,
'random_projections_ptp' : RandomProjectionsFeature,
'random_projections_energy' : RandomProjectionsEnergyFeature,
'ptp_lag' : PeakToPeakLagsFeature,
'global_ptp' : GlobalPeakToPeakFeature
}
|
flexible
|
{
"blob_id": "6fe22b3f98bff1a9b775fce631ae94a4ee22b04c",
"index": 4371,
"step-1": "<mask token>\n\n\nclass RandomProjectionsFeature(PipelineNode):\n <mask token>\n\n def get_dtype(self):\n return self._dtype\n <mask token>\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_lag_feature', return_output=\n True, parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass PeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_feature', return_output=True,\n parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.all_channels = all_channels\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n <mask token>\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_ptps = np.ptp(waveforms, axis=1)\n else:\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(np.ptp(wfs, axis=1))\n return all_ptps\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_lag_feature', return_output=\n True, parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-5": "\"\"\"Sorting components: peak waveform features.\"\"\"\nimport numpy as np\n\nfrom spikeinterface.core.job_tools import fix_job_kwargs\nfrom spikeinterface.core import get_channel_distances\nfrom spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeMonopolarTriangulation\nfrom spikeinterface.sortingcomponents.peak_pipeline import run_peak_pipeline, PipelineNode, ExtractDenseWaveforms\n\n\n\ndef compute_features_from_peaks(\n recording,\n peaks,\n feature_list=[\"ptp\", ],\n feature_params={},\n ms_before=1.,\n ms_after=1.,\n **job_kwargs,\n):\n \"\"\"Extract features on the fly from the recording given a list of peaks. \n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor object.\n peaks: array\n Peaks array, as returned by detect_peaks() in \"compact_numpy\" way.\n feature_list: List of features to be computed.\n - amplitude\n - ptp\n - com\n - energy\n ms_before: float\n The duration in ms before the peak for extracting the features (default 1 ms)\n ms_after: float\n The duration in ms after the peakfor extracting the features (default 1 ms)\n\n {}\n\n Returns\n -------\n A tuple of features. Even if there is one feature.\n Every feature have shape[0] == peaks.shape[0].\n dtype and other dim depends on features.\n\n \"\"\"\n job_kwargs = fix_job_kwargs(job_kwargs)\n\n extract_dense_waveforms = ExtractDenseWaveforms(recording, ms_before=ms_before, ms_after=ms_after, return_output=False)\n nodes = [\n extract_dense_waveforms,\n ]\n for feature_name in feature_list:\n Class = _features_class[feature_name]\n params = feature_params.get(feature_name, {}).copy()\n node = Class(recording, parents=[extract_dense_waveforms], **params)\n nodes.append(node)\n\n features = run_peak_pipeline(recording, peaks, nodes, job_kwargs, job_name='features_from_peaks', squeeze_output=False)\n\n return features\n\n\nclass AmplitudeFeature(PipelineNode):\n def __init__(self, recording, name='amplitude_feature', return_output=True, parents=None, \n all_channels=False, peak_sign='neg'):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.all_channels = all_channels\n self.peak_sign = peak_sign\n self._kwargs.update(dict(all_channels=all_channels, peak_sign=peak_sign))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n if self.peak_sign == 'neg':\n amplitudes = np.min(waveforms, axis=1)\n elif self.peak_sign == 'pos':\n amplitudes = np.max(waveforms, axis=1)\n elif self.peak_sign == 'both':\n amplitudes = np.max(np.abs(waveforms, axis=1))\n else:\n if self.peak_sign == 'neg':\n amplitudes = np.min(waveforms, axis=(1, 2))\n elif self.peak_sign == 'pos':\n amplitudes = np.max(waveforms, axis=(1, 2))\n elif self.peak_sign == 'both':\n amplitudes = np.max(np.abs(waveforms), axis=(1, 2))\n return amplitudes\n\n\nclass PeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='ptp_feature', return_output=True, parents=None,\n local_radius_um=150., all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.all_channels = all_channels\n self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_ptps = np.ptp(waveforms, axis=1)\n else:\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(np.ptp(wfs, axis=1))\n return all_ptps\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n def __init__(self, recording, name='ptp_lag_feature', return_output=True, parents=None,\n local_radius_um=150., all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature', return_output=True, parents=None,\n projections=None, local_radius_um=150., min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(projections=projections, local_radius_um=local_radius_um, min_values=min_values))\n \n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1)\n\n if self.min_values is not None:\n wf_ptp = (wf_ptp/self.min_values[chan_inds])**4\n\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections)/(denom[mask][:, np.newaxis])\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n def __init__(self, recording, name='random_projections_energy_feature', return_output=True, parents=None,\n projections=None, local_radius_um=150., min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n\n if self.min_values is not None:\n energies = (energies/self.min_values[chan_inds])**4\n\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n\n all_projections[idx[mask]] = np.dot(energies[mask], local_projections)/(denom[mask][:, np.newaxis])\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='std_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='global_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='kurtosis_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n def __init__(self, recording, name='energy_feature', return_output=True, parents=None,\n local_radius_um=50.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n_features_class = {\n 'amplitude': AmplitudeFeature,\n 'ptp' : PeakToPeakFeature,\n 'center_of_mass' : LocalizeCenterOfMass,\n 'monopolar_triangulation' : LocalizeMonopolarTriangulation,\n 'energy' : EnergyFeature,\n 'std_ptp' : StdPeakToPeakFeature,\n 'kurtosis_ptp' : KurtosisPeakToPeakFeature,\n 'random_projections_ptp' : RandomProjectionsFeature,\n 'random_projections_energy' : RandomProjectionsEnergyFeature,\n 'ptp_lag' : PeakToPeakLagsFeature,\n 'global_ptp' : GlobalPeakToPeakFeature\n}",
"step-ids": [
22,
24,
28,
31,
40
]
}
|
[
22,
24,
28,
31,
40
] |
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import classification_report
from BlogTutorials.pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from BlogTutorials.pyimagesearch.preprocessing.ROIpreprocessor import ROIPreprocessor
from BlogTutorials.pyimagesearch.datasets.simpledatasetloader import SimpleDatasetLoader
from BlogTutorials.pyimagesearch.nn.conv.minivggnet import MiniVGGNet
from keras.optimizers import SGD
from keras.utils import np_utils
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
"""
VGG16 net trained on depth maps for mushrooms/end tool in frame. Attempts binary classification of a depth map,
with output softmax to classify a prediction of the pick will be successful.
Outline of training approach (dogs_vs_cats classifier as template)
320 x 240 input pixels
(add mask layer of target mushroom)
labels org: /pick_confidence_net/pick_fail_depth & /pick_success_depth
/config/pick_confidence_config.py
file paths
num classes, training/val/test ratio of data
HDF5 data locations
outputs: model, normalization distance values, charts/training data
/build_pick_conf_dataset.py
# Get image paths
# Sep training, test, and validation data
# datasets list
# Preprocessors images (crop to ROI (160:320,0:120), normalize distance points)
"""
dataset_path = r"/home/matthewlefort/Documents/gitRepos/bella_training/pick_confidence_net/Depth"
# get class labels
imagePaths = list(paths.list_images(dataset_path))
classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]
# init preprocessors
roip = ROIPreprocessor(xmin=0, xmax=120, ymin=160, ymax=320)
iap = ImageToArrayPreprocessor()
# load the dataset from disk then scale the raw pixels
sdl = SimpleDatasetLoader(preprocessors=[roip, iap], img_load="mat")
(data, labels) = sdl.load(imagePaths=imagePaths, verbose=250)
labels = np.array(labels)
le = LabelEncoder()
le.fit(labels)
labels = np_utils.to_categorical(le.transform(labels), 2)
# accouunt for the skew in data labels. Used to amplifiy training weights of "smiling" case given the ratio of
# [9475 to 3690] (non smiling to smiling)
classTotals = labels.sum(axis=0)
classWeight = float(classTotals.max()) / classTotals
# split data (stratify sampling samples at same ratio of data set for test to train split. i.e the 9475:3690 ratio
# is kept in both the test and training set
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=.2, stratify=labels, random_state=42)
# partision data
# split_data = StratifiedShuffleSplit(1, test_size=0.2, random_state=42)
# for train_idx, test_idx in split_data.split(data, labels):
# trainX = data[train_idx]
# testX = data[test_idx]
# trainY = labels[train_idx]
# testY = labels[test_idx]
# initialize optimizer
print("[info] compile model...")
opt = SGD(lr=0.05)
model = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
epochs = 100
print("[info] Training model")
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=epochs, verbose=1)
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=classNames))
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
|
normal
|
{
"blob_id": "28cdb59e97f3052dd80f8437574f9ffe09fc1e84",
"index": 6690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nle.fit(labels)\n<mask token>\nprint('[info] compile model...')\n<mask token>\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n<mask token>\nprint('[info] Training model')\n<mask token>\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1\n ), target_names=classNames))\nplt.style.use('ggplot')\nplt.figure()\nplt.plot(np.arange(0, epochs), H.history['loss'], label='train_loss')\nplt.plot(np.arange(0, epochs), H.history['val_loss'], label='val_loss')\nplt.plot(np.arange(0, epochs), H.history['acc'], label='train_acc')\nplt.plot(np.arange(0, epochs), H.history['val_acc'], label='val_acc')\nplt.title('Training Loss and Accuracy')\nplt.xlabel('Epoch #')\nplt.ylabel('Loss/Accuracy')\nplt.legend()\nplt.show()\n",
"step-3": "<mask token>\ndataset_path = (\n '/home/matthewlefort/Documents/gitRepos/bella_training/pick_confidence_net/Depth'\n )\nimagePaths = list(paths.list_images(dataset_path))\nclassNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]\nclassNames = [str(x) for x in np.unique(classNames)]\nroip = ROIPreprocessor(xmin=0, xmax=120, ymin=160, ymax=320)\niap = ImageToArrayPreprocessor()\nsdl = SimpleDatasetLoader(preprocessors=[roip, iap], img_load='mat')\ndata, labels = sdl.load(imagePaths=imagePaths, verbose=250)\nlabels = np.array(labels)\nle = LabelEncoder()\nle.fit(labels)\nlabels = np_utils.to_categorical(le.transform(labels), 2)\nclassTotals = labels.sum(axis=0)\nclassWeight = float(classTotals.max()) / classTotals\ntrainX, testX, trainY, testY = train_test_split(data, labels, test_size=0.2,\n stratify=labels, random_state=42)\nprint('[info] compile model...')\nopt = SGD(lr=0.05)\nmodel = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\nepochs = 100\nprint('[info] Training model')\nH = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32,\n epochs=epochs, verbose=1)\npredictions = model.predict(testX, batch_size=32)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1\n ), target_names=classNames))\nplt.style.use('ggplot')\nplt.figure()\nplt.plot(np.arange(0, epochs), H.history['loss'], label='train_loss')\nplt.plot(np.arange(0, epochs), H.history['val_loss'], label='val_loss')\nplt.plot(np.arange(0, epochs), H.history['acc'], label='train_acc')\nplt.plot(np.arange(0, epochs), H.history['val_acc'], label='val_acc')\nplt.title('Training Loss and Accuracy')\nplt.xlabel('Epoch #')\nplt.ylabel('Loss/Accuracy')\nplt.legend()\nplt.show()\n",
"step-4": "from sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.metrics import classification_report\nfrom BlogTutorials.pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor\nfrom BlogTutorials.pyimagesearch.preprocessing.ROIpreprocessor import ROIPreprocessor\nfrom BlogTutorials.pyimagesearch.datasets.simpledatasetloader import SimpleDatasetLoader\nfrom BlogTutorials.pyimagesearch.nn.conv.minivggnet import MiniVGGNet\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n<mask token>\ndataset_path = (\n '/home/matthewlefort/Documents/gitRepos/bella_training/pick_confidence_net/Depth'\n )\nimagePaths = list(paths.list_images(dataset_path))\nclassNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]\nclassNames = [str(x) for x in np.unique(classNames)]\nroip = ROIPreprocessor(xmin=0, xmax=120, ymin=160, ymax=320)\niap = ImageToArrayPreprocessor()\nsdl = SimpleDatasetLoader(preprocessors=[roip, iap], img_load='mat')\ndata, labels = sdl.load(imagePaths=imagePaths, verbose=250)\nlabels = np.array(labels)\nle = LabelEncoder()\nle.fit(labels)\nlabels = np_utils.to_categorical(le.transform(labels), 2)\nclassTotals = labels.sum(axis=0)\nclassWeight = float(classTotals.max()) / classTotals\ntrainX, testX, trainY, testY = train_test_split(data, labels, test_size=0.2,\n stratify=labels, random_state=42)\nprint('[info] compile model...')\nopt = SGD(lr=0.05)\nmodel = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\nepochs = 100\nprint('[info] Training model')\nH = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32,\n epochs=epochs, verbose=1)\npredictions = model.predict(testX, batch_size=32)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1\n ), target_names=classNames))\nplt.style.use('ggplot')\nplt.figure()\nplt.plot(np.arange(0, epochs), H.history['loss'], label='train_loss')\nplt.plot(np.arange(0, epochs), H.history['val_loss'], label='val_loss')\nplt.plot(np.arange(0, epochs), H.history['acc'], label='train_acc')\nplt.plot(np.arange(0, epochs), H.history['val_acc'], label='val_acc')\nplt.title('Training Loss and Accuracy')\nplt.xlabel('Epoch #')\nplt.ylabel('Loss/Accuracy')\nplt.legend()\nplt.show()\n",
"step-5": "from sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.metrics import classification_report\nfrom BlogTutorials.pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor\nfrom BlogTutorials.pyimagesearch.preprocessing.ROIpreprocessor import ROIPreprocessor\nfrom BlogTutorials.pyimagesearch.datasets.simpledatasetloader import SimpleDatasetLoader\nfrom BlogTutorials.pyimagesearch.nn.conv.minivggnet import MiniVGGNet\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\"\"\"\n VGG16 net trained on depth maps for mushrooms/end tool in frame. Attempts binary classification of a depth map,\n with output softmax to classify a prediction of the pick will be successful.\n\n Outline of training approach (dogs_vs_cats classifier as template)\n\n 320 x 240 input pixels\n (add mask layer of target mushroom)\n labels org: /pick_confidence_net/pick_fail_depth & /pick_success_depth\n\n /config/pick_confidence_config.py\n file paths\n num classes, training/val/test ratio of data\n HDF5 data locations\n outputs: model, normalization distance values, charts/training data\n\n /build_pick_conf_dataset.py\n # Get image paths\n # Sep training, test, and validation data\n # datasets list\n # Preprocessors images (crop to ROI (160:320,0:120), normalize distance points)\n \n\"\"\"\n\n\ndataset_path = r\"/home/matthewlefort/Documents/gitRepos/bella_training/pick_confidence_net/Depth\"\n\n# get class labels\nimagePaths = list(paths.list_images(dataset_path))\nclassNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]\nclassNames = [str(x) for x in np.unique(classNames)]\n\n\n# init preprocessors\nroip = ROIPreprocessor(xmin=0, xmax=120, ymin=160, ymax=320)\niap = ImageToArrayPreprocessor()\n\n# load the dataset from disk then scale the raw pixels\nsdl = SimpleDatasetLoader(preprocessors=[roip, iap], img_load=\"mat\")\n(data, labels) = sdl.load(imagePaths=imagePaths, verbose=250)\n\nlabels = np.array(labels)\nle = LabelEncoder()\nle.fit(labels)\nlabels = np_utils.to_categorical(le.transform(labels), 2)\n\n# accouunt for the skew in data labels. Used to amplifiy training weights of \"smiling\" case given the ratio of\n# [9475 to 3690] (non smiling to smiling)\nclassTotals = labels.sum(axis=0)\nclassWeight = float(classTotals.max()) / classTotals\n\n# split data (stratify sampling samples at same ratio of data set for test to train split. i.e the 9475:3690 ratio\n# is kept in both the test and training set\n(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=.2, stratify=labels, random_state=42)\n\n# partision data\n# split_data = StratifiedShuffleSplit(1, test_size=0.2, random_state=42)\n# for train_idx, test_idx in split_data.split(data, labels):\n# trainX = data[train_idx]\n# testX = data[test_idx]\n# trainY = labels[train_idx]\n# testY = labels[test_idx]\n\n\n# initialize optimizer\nprint(\"[info] compile model...\")\nopt = SGD(lr=0.05)\nmodel = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\nepochs = 100\nprint(\"[info] Training model\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=epochs, verbose=1)\n\npredictions = model.predict(testX, batch_size=32)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=classNames))\n\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, epochs), H.history[\"acc\"], label=\"train_acc\")\nplt.plot(np.arange(0, epochs), H.history[\"val_acc\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution(object):
def twoSum(self, numbers, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
idx1 = 0
idx2 = len(numbers)-1
while(idx1<idx2): # can also use a for-loop: for num in numbers:
left = numbers[idx1]
right = numbers[idx2]
if (left + right) < target:
idx1 += 1
elif (left + right) > target:
idx2 -= 1
else:
return [idx1+1,idx2+1]
|
normal
|
{
"blob_id": "51b3beee8659bccee0fbb64b80fdce18b693674b",
"index": 9481,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def twoSum(self, numbers, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n idx1 = 0\n idx2 = len(numbers) - 1\n while idx1 < idx2:\n left = numbers[idx1]\n right = numbers[idx2]\n if left + right < target:\n idx1 += 1\n elif left + right > target:\n idx2 -= 1\n else:\n return [idx1 + 1, idx2 + 1]\n",
"step-4": "class Solution(object):\n def twoSum(self, numbers, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n idx1 = 0\n idx2 = len(numbers)-1\n while(idx1<idx2): # can also use a for-loop: for num in numbers: \n left = numbers[idx1]\n right = numbers[idx2]\n if (left + right) < target:\n idx1 += 1\n elif (left + right) > target:\n idx2 -= 1\n else:\n return [idx1+1,idx2+1]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
import subprocess
import tempfile
import os
import filecmp
import shutil
import cfg
import utils
class TestFunctionalHumannEndtoEndBiom(unittest.TestCase):
"""
Test humann with end to end functional tests
"""
def test_humann_fastq_biom_output(self):
"""
Test the standard humann flow on a fastq input file
Test biom output is written
"""
# create a temp directory for output
tempdir = utils.create_temp_folder("fastq")
# run humann test
command = ["humann","--input",cfg.demo_fastq,"--output",tempdir,
"--output-format", "biom"]
utils.run_humann(command)
# check the output files are as expected
for expression, message in utils.check_output(cfg.expected_demo_output_files_biom, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
def test_humann_fastq_biom_output_pathways(self):
"""
Test the standard humann flow on a fastq input file
Test biom output is written
Test the expected pathways are identified
"""
# create a temp directory for output
tempdir = utils.create_temp_folder("fastq")
# run humann test
command = ["humann","--input",cfg.demo_fastq,"--output",tempdir,
"--output-format", "biom", "--gap-fill", "off"]
utils.run_humann(command)
# check the output file of pathway abundance has the expected pathways
pathways_file_tsv=utils.read_biom_table(os.path.join(tempdir,"demo_pathabundance.biom"))
pathways_found=set([x.split("\t")[0].split(":")[0] for x in filter(lambda x: "PWY" in x, pathways_file_tsv)])
self.assertEqual(pathways_found,cfg.expected_demo_output_files_biom_pathways)
# remove the temp directory
utils.remove_temp_folder(tempdir)
def test_humann_gene_families_biom_input(self):
"""
Test the standard humann flow on a gene families output file as input
Test with the biom format of the gene families file
"""
# create a temp directory for output
tempdir = utils.create_temp_folder("gene_families")
# run humann test
command = ["humann","--input",cfg.demo_gene_families_biom,"--output",tempdir]
utils.run_humann(command)
# check the output files are as expected
# it will include all output files except the gene families output file
# since this file was used as input
for expression, message in utils.check_output(cfg.expected_demo_output_files_genefamilies_input, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
|
normal
|
{
"blob_id": "27702f72ae147c435617acaab7dd7e5a5a737b13",
"index": 8152,
"step-1": "<mask token>\n\n\nclass TestFunctionalHumannEndtoEndBiom(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_humann_gene_families_biom_input(self):\n \"\"\"\n Test the standard humann flow on a gene families output file as input\n Test with the biom format of the gene families file\n \"\"\"\n tempdir = utils.create_temp_folder('gene_families')\n command = ['humann', '--input', cfg.demo_gene_families_biom,\n '--output', tempdir]\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_genefamilies_input, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n",
"step-2": "<mask token>\n\n\nclass TestFunctionalHumannEndtoEndBiom(unittest.TestCase):\n <mask token>\n\n def test_humann_fastq_biom_output(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n \"\"\"\n tempdir = utils.create_temp_folder('fastq')\n command = ['humann', '--input', cfg.demo_fastq, '--output', tempdir,\n '--output-format', 'biom']\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_biom, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n\n def test_humann_fastq_biom_output_pathways(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n Test the expected pathways are identified\n \"\"\"\n tempdir = utils.create_temp_folder('fastq')\n command = ['humann', '--input', cfg.demo_fastq, '--output', tempdir,\n '--output-format', 'biom', '--gap-fill', 'off']\n utils.run_humann(command)\n pathways_file_tsv = utils.read_biom_table(os.path.join(tempdir,\n 'demo_pathabundance.biom'))\n pathways_found = set([x.split('\\t')[0].split(':')[0] for x in\n filter(lambda x: 'PWY' in x, pathways_file_tsv)])\n self.assertEqual(pathways_found, cfg.\n expected_demo_output_files_biom_pathways)\n utils.remove_temp_folder(tempdir)\n\n def test_humann_gene_families_biom_input(self):\n \"\"\"\n Test the standard humann flow on a gene families output file as input\n Test with the biom format of the gene families file\n \"\"\"\n tempdir = utils.create_temp_folder('gene_families')\n command = ['humann', '--input', cfg.demo_gene_families_biom,\n '--output', tempdir]\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_genefamilies_input, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n",
"step-3": "<mask token>\n\n\nclass TestFunctionalHumannEndtoEndBiom(unittest.TestCase):\n \"\"\"\n Test humann with end to end functional tests\n \"\"\"\n\n def test_humann_fastq_biom_output(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n \"\"\"\n tempdir = utils.create_temp_folder('fastq')\n command = ['humann', '--input', cfg.demo_fastq, '--output', tempdir,\n '--output-format', 'biom']\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_biom, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n\n def test_humann_fastq_biom_output_pathways(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n Test the expected pathways are identified\n \"\"\"\n tempdir = utils.create_temp_folder('fastq')\n command = ['humann', '--input', cfg.demo_fastq, '--output', tempdir,\n '--output-format', 'biom', '--gap-fill', 'off']\n utils.run_humann(command)\n pathways_file_tsv = utils.read_biom_table(os.path.join(tempdir,\n 'demo_pathabundance.biom'))\n pathways_found = set([x.split('\\t')[0].split(':')[0] for x in\n filter(lambda x: 'PWY' in x, pathways_file_tsv)])\n self.assertEqual(pathways_found, cfg.\n expected_demo_output_files_biom_pathways)\n utils.remove_temp_folder(tempdir)\n\n def test_humann_gene_families_biom_input(self):\n \"\"\"\n Test the standard humann flow on a gene families output file as input\n Test with the biom format of the gene families file\n \"\"\"\n tempdir = utils.create_temp_folder('gene_families')\n command = ['humann', '--input', cfg.demo_gene_families_biom,\n '--output', tempdir]\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_genefamilies_input, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n",
"step-4": "import unittest\nimport subprocess\nimport tempfile\nimport os\nimport filecmp\nimport shutil\nimport cfg\nimport utils\n\n\nclass TestFunctionalHumannEndtoEndBiom(unittest.TestCase):\n \"\"\"\n Test humann with end to end functional tests\n \"\"\"\n\n def test_humann_fastq_biom_output(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n \"\"\"\n tempdir = utils.create_temp_folder('fastq')\n command = ['humann', '--input', cfg.demo_fastq, '--output', tempdir,\n '--output-format', 'biom']\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_biom, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n\n def test_humann_fastq_biom_output_pathways(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n Test the expected pathways are identified\n \"\"\"\n tempdir = utils.create_temp_folder('fastq')\n command = ['humann', '--input', cfg.demo_fastq, '--output', tempdir,\n '--output-format', 'biom', '--gap-fill', 'off']\n utils.run_humann(command)\n pathways_file_tsv = utils.read_biom_table(os.path.join(tempdir,\n 'demo_pathabundance.biom'))\n pathways_found = set([x.split('\\t')[0].split(':')[0] for x in\n filter(lambda x: 'PWY' in x, pathways_file_tsv)])\n self.assertEqual(pathways_found, cfg.\n expected_demo_output_files_biom_pathways)\n utils.remove_temp_folder(tempdir)\n\n def test_humann_gene_families_biom_input(self):\n \"\"\"\n Test the standard humann flow on a gene families output file as input\n Test with the biom format of the gene families file\n \"\"\"\n tempdir = utils.create_temp_folder('gene_families')\n command = ['humann', '--input', cfg.demo_gene_families_biom,\n '--output', tempdir]\n utils.run_humann(command)\n for expression, message in utils.check_output(cfg.\n expected_demo_output_files_genefamilies_input, tempdir):\n self.assertTrue(expression, message)\n utils.remove_temp_folder(tempdir)\n",
"step-5": "import unittest\nimport subprocess\nimport tempfile\nimport os\nimport filecmp\nimport shutil\n\nimport cfg\nimport utils\n\nclass TestFunctionalHumannEndtoEndBiom(unittest.TestCase):\n \"\"\"\n Test humann with end to end functional tests\n \"\"\"\n\n def test_humann_fastq_biom_output(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n \"\"\"\n \n # create a temp directory for output\n tempdir = utils.create_temp_folder(\"fastq\")\n \n # run humann test\n command = [\"humann\",\"--input\",cfg.demo_fastq,\"--output\",tempdir,\n \"--output-format\", \"biom\"]\n utils.run_humann(command)\n \n # check the output files are as expected\n for expression, message in utils.check_output(cfg.expected_demo_output_files_biom, tempdir):\n self.assertTrue(expression,message)\n\n # remove the temp directory\n utils.remove_temp_folder(tempdir)\n \n def test_humann_fastq_biom_output_pathways(self):\n \"\"\"\n Test the standard humann flow on a fastq input file\n Test biom output is written\n Test the expected pathways are identified\n \"\"\"\n \n # create a temp directory for output\n tempdir = utils.create_temp_folder(\"fastq\")\n \n # run humann test\n command = [\"humann\",\"--input\",cfg.demo_fastq,\"--output\",tempdir,\n \"--output-format\", \"biom\", \"--gap-fill\", \"off\"]\n utils.run_humann(command)\n \n # check the output file of pathway abundance has the expected pathways\n pathways_file_tsv=utils.read_biom_table(os.path.join(tempdir,\"demo_pathabundance.biom\"))\n pathways_found=set([x.split(\"\\t\")[0].split(\":\")[0] for x in filter(lambda x: \"PWY\" in x, pathways_file_tsv)])\n \n self.assertEqual(pathways_found,cfg.expected_demo_output_files_biom_pathways)\n\n # remove the temp directory\n utils.remove_temp_folder(tempdir)\n \n def test_humann_gene_families_biom_input(self):\n \"\"\"\n Test the standard humann flow on a gene families output file as input\n Test with the biom format of the gene families file\n \"\"\"\n \n # create a temp directory for output\n tempdir = utils.create_temp_folder(\"gene_families\")\n \n # run humann test\n command = [\"humann\",\"--input\",cfg.demo_gene_families_biom,\"--output\",tempdir]\n utils.run_humann(command)\n \n # check the output files are as expected\n # it will include all output files except the gene families output file\n # since this file was used as input\n for expression, message in utils.check_output(cfg.expected_demo_output_files_genefamilies_input, tempdir):\n self.assertTrue(expression,message)\n\n # remove the temp directory\n utils.remove_temp_folder(tempdir)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import os
import sys
import logging.config
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Float, String, Text, Integer
import pandas as pd
import numpy as np
sys.path.append('./config')
import config
logging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')
logger = logging.getLogger(__file__)
Base = declarative_base()
class BeanAttributes(Base):
""" Defines the data model for the table `bean_attributes`. """
__tablename__ = 'bean_attributes'
id = Column(Integer, primary_key=True)
species = Column(String(100), unique=False, nullable=True)
owner = Column(String(100), unique=False, nullable=True)
country = Column(String(100), unique=False, nullable=True)
farm_name = Column(String(100), unique=False, nullable=True)
company = Column(String(100), unique=False, nullable=True)
region = Column(String(100), unique=False, nullable=True)
producer = Column(String(100), unique=False, nullable=True)
grading_date = Column(String(100), unique=False, nullable=True)
processing_method = Column(Text, unique=False, nullable=True)
aroma = Column(Float, unique=False, nullable=True)
flavor = Column(Float, unique=False, nullable=True)
aftertaste = Column(Float, unique=False, nullable=True)
acidity = Column(Float, unique=False, nullable=True)
body = Column(Float, unique=False, nullable=True)
balance = Column(Float, unique=False, nullable=True)
uniformity = Column(Float, unique=False, nullable=True)
cleancup = Column(Float, unique=False, nullable=True)
sweetness = Column(Float, unique=False, nullable=True)
total_cup_point = Column(Float, unique=False, nullable=True)
moisture = Column(Float, unique=False, nullable=True)
color = Column(String(100), unique=False, nullable=True)
cluster = Column(Integer, unique=False, nullable=True)
def __repr__(self):
return '<BeanAttributes %r>' % self.id
def persist_to_db(engine_string):
"""Persist the data to database.
Args:
engine_string (`str`): Engine string for SQLAlchemy.
Returns:
None.
"""
engine = sql.create_engine(engine_string)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
# Delete all existing records in the table
if config.LOCAL_DB_FLAG:
try:
session.execute('''DELETE FROM msia_db.bean_attributes''')
except:
pass
else:
try:
session.execute('''DELETE FROM bean_attributes''')
except:
pass
# Read the data table and persist it into the database
raw_data = pd.read_csv(config.DATA_TABLE_PATH)
raw_data = raw_data.replace(np.nan, '', regex=True)
try:
for i in range(raw_data.shape[0]):
bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),
species=str(raw_data.iloc[i]['Species']),
owner=str(raw_data.iloc[i]['Owner.1']),
country=str(raw_data.iloc[i]['Country.of.Origin']),
farm_name=str(raw_data.iloc[i]['Farm.Name']),
company=str(raw_data.iloc[i]['Company']),
region=str(raw_data.iloc[i]['Region']),
producer=str(raw_data.iloc[i]['Producer']),
grading_date=str(raw_data.iloc[i]['Grading.Date']),
processing_method=str(raw_data.iloc[i]['Processing.Method']),
aroma=float(raw_data.iloc[i]['Aroma']),
flavor=float(raw_data.iloc[i]['Flavor']),
aftertaste=float(raw_data.iloc[i]['Aftertaste']),
acidity=float(raw_data.iloc[i]['Acidity']),
body=float(raw_data.iloc[i]['Body']),
balance=float(raw_data.iloc[i]['Balance']),
uniformity=float(raw_data.iloc[i]['Uniformity']),
cleancup=float(raw_data.iloc[i]['Clean.Cup']),
sweetness=float(raw_data.iloc[i]['Sweetness']),
total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),
moisture=float(raw_data.iloc[i]['Moisture']),
color=str(raw_data.iloc[i]['Color']),
cluster=int(raw_data.iloc[i]['cluster'])
)
session.add(bean_row)
logger.debug('Row %d added to table ' % i)
session.commit()
except sql.exc.IntegrityError: # Check primary key duplication
logger.error("Duplicated coffee bean")
except Exception as e:
logger.error("Incorrect credentials, access denied", e)
finally:
session.close()
if __name__ == "__main__":
# Obtain parameters from os
conn_type = "mysql+pymysql"
user = os.environ.get("MYSQL_USER")
password = os.environ.get("MYSQL_PASSWORD")
host = os.environ.get("MYSQL_HOST")
port = os.environ.get("MYSQL_PORT")
database = os.environ.get("DATABASE_NAME")
local_database_path = config.LOCAL_DATABASE_PATH
# If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment
if config.SQLALCHEMY_DATABASE_URI is None:
# Whether to create a local SQLite database or an AWS RDS database
if config.LOCAL_DB_FLAG:
engine_string = "sqlite:///{}".format(local_database_path)
else:
engine_string = "{}://{}:{}@{}:{}/{}".format(conn_type, user, password, host, port, database)
else:
engine_string = config.SQLALCHEMY_DATABASE_URI
try:
engine_string = 'sqlite:///data/bean.db'
persist_to_db(engine_string)
logger.info("Data successfully persisted into the database")
except Exception as e:
logger.error(e)
sys.exit(1)
|
normal
|
{
"blob_id": "76f2312a01bf8475220a9fcc16209faddfccd2ae",
"index": 9754,
"step-1": "<mask token>\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('./config')\n<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\n<mask token>\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-3": "<mask token>\nsys.path.append('./config')\n<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\nBase = declarative_base()\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-4": "import os\nimport sys\nimport logging.config\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Float, String, Text, Integer\nimport pandas as pd\nimport numpy as np\nsys.path.append('./config')\nimport config\nlogging.basicConfig(level=logging.INFO, format=\n '%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\nBase = declarative_base()\n\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n __tablename__ = 'bean_attributes'\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('DELETE FROM msia_db.bean_attributes')\n except:\n pass\n else:\n try:\n session.execute('DELETE FROM bean_attributes')\n except:\n pass\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']\n ), species=str(raw_data.iloc[i]['Species']), owner=str(\n raw_data.iloc[i]['Owner.1']), country=str(raw_data.iloc[i][\n 'Country.of.Origin']), farm_name=str(raw_data.iloc[i][\n 'Farm.Name']), company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']), producer=str(\n raw_data.iloc[i]['Producer']), grading_date=str(raw_data.\n iloc[i]['Grading.Date']), processing_method=str(raw_data.\n iloc[i]['Processing.Method']), aroma=float(raw_data.iloc[i]\n ['Aroma']), flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']), acidity=\n float(raw_data.iloc[i]['Acidity']), body=float(raw_data.\n iloc[i]['Body']), balance=float(raw_data.iloc[i]['Balance']\n ), uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']), sweetness=\n float(raw_data.iloc[i]['Sweetness']), total_cup_point=float\n (raw_data.iloc[i]['Total.Cup.Points']), moisture=float(\n raw_data.iloc[i]['Moisture']), color=str(raw_data.iloc[i][\n 'Color']), cluster=int(raw_data.iloc[i]['cluster']))\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError:\n logger.error('Duplicated coffee bean')\n except Exception as e:\n logger.error('Incorrect credentials, access denied', e)\n finally:\n session.close()\n\n\nif __name__ == '__main__':\n conn_type = 'mysql+pymysql'\n user = os.environ.get('MYSQL_USER')\n password = os.environ.get('MYSQL_PASSWORD')\n host = os.environ.get('MYSQL_HOST')\n port = os.environ.get('MYSQL_PORT')\n database = os.environ.get('DATABASE_NAME')\n local_database_path = config.LOCAL_DATABASE_PATH\n if config.SQLALCHEMY_DATABASE_URI is None:\n if config.LOCAL_DB_FLAG:\n engine_string = 'sqlite:///{}'.format(local_database_path)\n else:\n engine_string = '{}://{}:{}@{}:{}/{}'.format(conn_type, user,\n password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info('Data successfully persisted into the database')\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n",
"step-5": "import os\nimport sys\nimport logging.config\nimport sqlalchemy as sql\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Float, String, Text, Integer\nimport pandas as pd\nimport numpy as np\nsys.path.append('./config')\nimport config\n\nlogging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')\nlogger = logging.getLogger(__file__)\n\nBase = declarative_base()\n\nclass BeanAttributes(Base):\n \"\"\" Defines the data model for the table `bean_attributes`. \"\"\"\n\n __tablename__ = 'bean_attributes'\n\n id = Column(Integer, primary_key=True)\n species = Column(String(100), unique=False, nullable=True)\n owner = Column(String(100), unique=False, nullable=True)\n country = Column(String(100), unique=False, nullable=True)\n farm_name = Column(String(100), unique=False, nullable=True)\n company = Column(String(100), unique=False, nullable=True)\n region = Column(String(100), unique=False, nullable=True)\n producer = Column(String(100), unique=False, nullable=True)\n grading_date = Column(String(100), unique=False, nullable=True)\n processing_method = Column(Text, unique=False, nullable=True)\n aroma = Column(Float, unique=False, nullable=True)\n flavor = Column(Float, unique=False, nullable=True)\n aftertaste = Column(Float, unique=False, nullable=True)\n acidity = Column(Float, unique=False, nullable=True)\n body = Column(Float, unique=False, nullable=True)\n balance = Column(Float, unique=False, nullable=True)\n uniformity = Column(Float, unique=False, nullable=True)\n cleancup = Column(Float, unique=False, nullable=True)\n sweetness = Column(Float, unique=False, nullable=True)\n total_cup_point = Column(Float, unique=False, nullable=True)\n moisture = Column(Float, unique=False, nullable=True)\n color = Column(String(100), unique=False, nullable=True)\n cluster = Column(Integer, unique=False, nullable=True)\n\n def __repr__(self):\n return '<BeanAttributes %r>' % self.id\n\n\ndef persist_to_db(engine_string):\n \"\"\"Persist the data to database.\n Args:\n engine_string (`str`): Engine string for SQLAlchemy.\n Returns:\n None.\n \"\"\"\n\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Delete all existing records in the table\n if config.LOCAL_DB_FLAG:\n try:\n session.execute('''DELETE FROM msia_db.bean_attributes''')\n except:\n pass\n else:\n try:\n session.execute('''DELETE FROM bean_attributes''')\n except:\n pass\n\n # Read the data table and persist it into the database\n raw_data = pd.read_csv(config.DATA_TABLE_PATH)\n raw_data = raw_data.replace(np.nan, '', regex=True)\n\n try:\n for i in range(raw_data.shape[0]):\n bean_row = BeanAttributes(id=int(raw_data.iloc[i]['Unnamed: 0']),\n species=str(raw_data.iloc[i]['Species']),\n owner=str(raw_data.iloc[i]['Owner.1']),\n country=str(raw_data.iloc[i]['Country.of.Origin']),\n farm_name=str(raw_data.iloc[i]['Farm.Name']),\n company=str(raw_data.iloc[i]['Company']),\n region=str(raw_data.iloc[i]['Region']),\n producer=str(raw_data.iloc[i]['Producer']),\n grading_date=str(raw_data.iloc[i]['Grading.Date']),\n processing_method=str(raw_data.iloc[i]['Processing.Method']),\n aroma=float(raw_data.iloc[i]['Aroma']),\n flavor=float(raw_data.iloc[i]['Flavor']),\n aftertaste=float(raw_data.iloc[i]['Aftertaste']),\n acidity=float(raw_data.iloc[i]['Acidity']),\n body=float(raw_data.iloc[i]['Body']),\n balance=float(raw_data.iloc[i]['Balance']),\n uniformity=float(raw_data.iloc[i]['Uniformity']),\n cleancup=float(raw_data.iloc[i]['Clean.Cup']),\n sweetness=float(raw_data.iloc[i]['Sweetness']),\n total_cup_point=float(raw_data.iloc[i]['Total.Cup.Points']),\n moisture=float(raw_data.iloc[i]['Moisture']),\n color=str(raw_data.iloc[i]['Color']),\n cluster=int(raw_data.iloc[i]['cluster'])\n )\n session.add(bean_row)\n logger.debug('Row %d added to table ' % i)\n session.commit()\n except sql.exc.IntegrityError: # Check primary key duplication\n logger.error(\"Duplicated coffee bean\")\n except Exception as e:\n logger.error(\"Incorrect credentials, access denied\", e)\n finally:\n session.close()\n\n\nif __name__ == \"__main__\":\n\n # Obtain parameters from os\n conn_type = \"mysql+pymysql\"\n user = os.environ.get(\"MYSQL_USER\")\n password = os.environ.get(\"MYSQL_PASSWORD\")\n host = os.environ.get(\"MYSQL_HOST\")\n port = os.environ.get(\"MYSQL_PORT\")\n database = os.environ.get(\"DATABASE_NAME\")\n local_database_path = config.LOCAL_DATABASE_PATH\n\n # If users wish to write to their own SQLALCHEMY_DATABASE_URI in the environment\n if config.SQLALCHEMY_DATABASE_URI is None:\n # Whether to create a local SQLite database or an AWS RDS database\n if config.LOCAL_DB_FLAG:\n engine_string = \"sqlite:///{}\".format(local_database_path)\n else:\n engine_string = \"{}://{}:{}@{}:{}/{}\".format(conn_type, user, password, host, port, database)\n else:\n engine_string = config.SQLALCHEMY_DATABASE_URI\n\n try:\n engine_string = 'sqlite:///data/bean.db'\n persist_to_db(engine_string)\n logger.info(\"Data successfully persisted into the database\")\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env pybricks-micropython
from pybricks import ev3brick as brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import (Port, Stop, Direction, Button, Color,
SoundFile, ImageFile, Align)
from pybricks.tools import print, wait, StopWatch
from pybricks.robotics import DriveBase
# Write your program here
motor_a = Motor(Port.A)
brick.sound.beep()
wait(1000)
motor_a.run_target(500, 720) #500 degrees per second, 90 target angle
wait(1000)
brick.sound.beep(1000, 500) #frequency, duration
|
normal
|
{
"blob_id": "f6ebc3c37a69e5ec49d91609db394eec4a94cedf",
"index": 9982,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720)\nwait(1000)\nbrick.sound.beep(1000, 500)\n",
"step-3": "<mask token>\nmotor_a = Motor(Port.A)\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720)\nwait(1000)\nbrick.sound.beep(1000, 500)\n",
"step-4": "from pybricks import ev3brick as brick\nfrom pybricks.ev3devices import Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor\nfrom pybricks.parameters import Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align\nfrom pybricks.tools import print, wait, StopWatch\nfrom pybricks.robotics import DriveBase\nmotor_a = Motor(Port.A)\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720)\nwait(1000)\nbrick.sound.beep(1000, 500)\n",
"step-5": "#!/usr/bin/env pybricks-micropython\n\nfrom pybricks import ev3brick as brick\nfrom pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,\n InfraredSensor, UltrasonicSensor, GyroSensor)\nfrom pybricks.parameters import (Port, Stop, Direction, Button, Color,\n SoundFile, ImageFile, Align)\nfrom pybricks.tools import print, wait, StopWatch\nfrom pybricks.robotics import DriveBase\n\n# Write your program here\nmotor_a = Motor(Port.A)\n\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720) #500 degrees per second, 90 target angle\nwait(1000)\nbrick.sound.beep(1000, 500) #frequency, duration\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
class Pastebin(models.Model):
name= models.CharField(max_length=30)
textpaste = models.CharField(max_length=80)
pasteurl = models.AutoField(primary_key=True)
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "3badf65a5301cc9cf26811e3989631aec5d31910",
"index": 2709,
"step-1": "<mask token>\n\n\nclass Pastebin(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Pastebin(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Pastebin(models.Model):\n name = models.CharField(max_length=30)\n textpaste = models.CharField(max_length=80)\n pasteurl = models.AutoField(primary_key=True)\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Pastebin(models.Model):\n name = models.CharField(max_length=30)\n textpaste = models.CharField(max_length=80)\n pasteurl = models.AutoField(primary_key=True)\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Pastebin(models.Model):\n\tname= models.CharField(max_length=30)\n\ttextpaste = models.CharField(max_length=80)\n\tpasteurl = models.AutoField(primary_key=True)\n\t\n\n\tdef __str__(self):\n\t\treturn self.name\n\t",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MultinomialNB:
<|reserved_special_token_0|>
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
prior_numerator = [len(x) for x in X_separated_by_class]
prior_denominator = X.shape[0]
self.prior_prob = np.array(prior_numerator) / prior_denominator
self.log_prior_prob = np.log(self.prior_prob)
conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +
self.K) for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(
conditional_prob_numerator.sum(axis=1), axis=1)
self.conditional_prob = (conditional_prob_numerator /
conditional_prob_denominator)
return self
def predict(self, X):
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)
).sum(axis=1) for x in X])
posterior_pronb = log_conditional_prob + self.log_prior_prob
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
self.prior_prob = np.array([(len(x) / X.shape[0]) for x in
X_separated_by_class])
self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for
x in X_separated_by_class])
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
covariance_diagonal_matrix = (mean_square_difference + self.K
) / len(x) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(
covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))
log_exponent = -(x - mean) ** 2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
conditional_prob.append(self.log_gaussian_distribution(x,
mean, variance))
posterior_prob = np.array(conditional_prob) + np.log(self.
prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MultinomialNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
prior_numerator = [len(x) for x in X_separated_by_class]
prior_denominator = X.shape[0]
self.prior_prob = np.array(prior_numerator) / prior_denominator
self.log_prior_prob = np.log(self.prior_prob)
conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +
self.K) for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(
conditional_prob_numerator.sum(axis=1), axis=1)
self.conditional_prob = (conditional_prob_numerator /
conditional_prob_denominator)
return self
def predict(self, X):
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)
).sum(axis=1) for x in X])
posterior_pronb = log_conditional_prob + self.log_prior_prob
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
self.prior_prob = np.array([(len(x) / X.shape[0]) for x in
X_separated_by_class])
self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for
x in X_separated_by_class])
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
covariance_diagonal_matrix = (mean_square_difference + self.K
) / len(x) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(
covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))
log_exponent = -(x - mean) ** 2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
conditional_prob.append(self.log_gaussian_distribution(x,
mean, variance))
posterior_prob = np.array(conditional_prob) + np.log(self.
prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BernoulliNB:
def __init__(self, k=1.0, binarize=0.0):
self.K = k
self.binarize = binarize
def fit(self, X, y):
X = self._binarize(X)
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
self.n_examples, self.n_features = X.shape
prior_numerator = np.array([len(x) for x in X_separated_by_class])
self.prior_prob = prior_numerator / self.n_examples
self.log_prior_prob = np.log(self.prior_prob)
conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +
self.K) for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *
self.K) for x in X_separated_by_class]), axis=1)
self.conditional_prob = (conditional_prob_numerator /
conditional_prob_denominator)
return self
def predict(self, X):
X = self._binarize(X)
posterior_prob_numerator = np.array([((x * np.log(self.
conditional_prob) + np.abs(1 - x) * np.log(1 - self.
conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])
posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(
self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.
conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in
X]).sum(axis=1), axis=1)
posterior_prob = posterior_prob_numerator - posterior_prob_denominator
return np.argmax(posterior_prob, axis=1)
def _binarize(self, X):
return np.where(X > self.binarize, 1, 0)
class MultinomialNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
prior_numerator = [len(x) for x in X_separated_by_class]
prior_denominator = X.shape[0]
self.prior_prob = np.array(prior_numerator) / prior_denominator
self.log_prior_prob = np.log(self.prior_prob)
conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +
self.K) for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(
conditional_prob_numerator.sum(axis=1), axis=1)
self.conditional_prob = (conditional_prob_numerator /
conditional_prob_denominator)
return self
def predict(self, X):
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)
).sum(axis=1) for x in X])
posterior_pronb = log_conditional_prob + self.log_prior_prob
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
self.prior_prob = np.array([(len(x) / X.shape[0]) for x in
X_separated_by_class])
self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for
x in X_separated_by_class])
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
covariance_diagonal_matrix = (mean_square_difference + self.K
) / len(x) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(
covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))
log_exponent = -(x - mean) ** 2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
conditional_prob.append(self.log_gaussian_distribution(x,
mean, variance))
posterior_prob = np.array(conditional_prob) + np.log(self.
prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
class BernoulliNB:
def __init__(self, k=1.0, binarize=0.0):
self.K = k
self.binarize = binarize
def fit(self, X, y):
X = self._binarize(X)
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
self.n_examples, self.n_features = X.shape
prior_numerator = np.array([len(x) for x in X_separated_by_class])
self.prior_prob = prior_numerator / self.n_examples
self.log_prior_prob = np.log(self.prior_prob)
conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +
self.K) for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *
self.K) for x in X_separated_by_class]), axis=1)
self.conditional_prob = (conditional_prob_numerator /
conditional_prob_denominator)
return self
def predict(self, X):
X = self._binarize(X)
posterior_prob_numerator = np.array([((x * np.log(self.
conditional_prob) + np.abs(1 - x) * np.log(1 - self.
conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])
posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(
self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.
conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in
X]).sum(axis=1), axis=1)
posterior_prob = posterior_prob_numerator - posterior_prob_denominator
return np.argmax(posterior_prob, axis=1)
def _binarize(self, X):
return np.where(X > self.binarize, 1, 0)
class MultinomialNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
prior_numerator = [len(x) for x in X_separated_by_class]
prior_denominator = X.shape[0]
self.prior_prob = np.array(prior_numerator) / prior_denominator
self.log_prior_prob = np.log(self.prior_prob)
conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +
self.K) for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(
conditional_prob_numerator.sum(axis=1), axis=1)
self.conditional_prob = (conditional_prob_numerator /
conditional_prob_denominator)
return self
def predict(self, X):
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)
).sum(axis=1) for x in X])
posterior_pronb = log_conditional_prob + self.log_prior_prob
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
self.K = k
def fit(self, X, y):
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in
np.unique(y)]
self.n_classes = len(np.unique(y))
self.prior_prob = np.array([(len(x) / X.shape[0]) for x in
X_separated_by_class])
self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for
x in X_separated_by_class])
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
covariance_diagonal_matrix = (mean_square_difference + self.K
) / len(x) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(
covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))
log_exponent = -(x - mean) ** 2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
conditional_prob.append(self.log_gaussian_distribution(x,
mean, variance))
posterior_prob = np.array(conditional_prob) + np.log(self.
prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
<|reserved_special_token_1|>
"""
Naive Bayes Class
- Bernoulli Naive Bayes
- Multinomial Naive Bayes
- Gaussian Naive Bayes
Arthor: Zhenhuan(Steven) Sun
"""
import numpy as np
class BernoulliNB:
def __init__(self, k=1.0, binarize=0.0):
# Laplace Smoothing Factor
self.K = k
# the degree of binarization
self.binarize = binarize
def fit(self, X, y):
# binarize X
# since we assume data is bernoulli distributed we need to make sure
# that data consist of binary values
X = self._binarize(X)
# separate training data by classes(different target)
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples and number of features in X
self.n_examples, self.n_features = X.shape
# count the number of examples that belong to class k (0 or 1 in spam classification)
prior_numerator = np.array([len(x) for x in X_separated_by_class])
# compute the prior probability (P(y))
self.prior_prob = prior_numerator / self.n_examples
# compute the log prior probability (log(P(y))) for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability
# with laplace smoothing we assume we have seen each feature at least self.K times
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# binarize X
X = self._binarize(X)
# compute log posterior probability log(P(y|X))
posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X])
posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X]).sum(axis=1), axis=1)
posterior_prob = posterior_prob_numerator - posterior_prob_denominator
# alternative solution
# since posterior_prob_denominator is a constant thus we don't bother compute the denominator
# compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster
#return np.argmax(posterior_prob_numerator, axis=1)
return np.argmax(posterior_prob, axis=1)
def _binarize(self, X):
# convert the values in X to binary values (0 or 1)
return np.where(X > self.binarize, 1, 0)
class MultinomialNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training data by class
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples that belong to different classes
prior_numerator = [len(x) for x in X_separated_by_class]
# count the total number of examples in the training set
prior_denominator = X.shape[0]
# compute prior probability
self.prior_prob = np.array(prior_numerator) / prior_denominator
# compute log prior probability for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability's numerator for different class (with laplace smoothing)
# assume we have seen each feature at least once to avoid divide by zero error
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
# compute the conditional probability's denominator for different class
conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)
# compute the conditional probability for each feature and for each different classes
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# compute the log conditional probability for each examples and for each different classes
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])
# compute the posterior probability
posterior_pronb = log_conditional_prob + self.log_prior_prob
# make prediction
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training set by classes
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# count the number of different classes
self.n_classes = len(np.unique(y))
# compute prior probability
self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])
# compute mean vector for each class
self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])
# compute covariance matrix for each class
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
# compute the covariance matrix for each examples (slow as hell -> abandoned)
# mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)
# mean_square_difference += mean_difference.dot(mean_difference.T)
# compute the diagnal entries of covariance matrix for each examples (much faster than above method)
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
# convert the list of diagonal entries back to covariance diagonal matrix
# here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that
# there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future
covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))
log_exponent = -(x - mean)**2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
# list that stores all test data's posterior probability
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
# compute conditional probability for each class
conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))
# compute posterior probability
posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
|
flexible
|
{
"blob_id": "5dfe86d654e4184bab4401f8b634326996e42e9c",
"index": 2646,
"step-1": "<mask token>\n\n\nclass MultinomialNB:\n <mask token>\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-2": "<mask token>\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-3": "<mask token>\n\n\nclass BernoulliNB:\n\n def __init__(self, k=1.0, binarize=0.0):\n self.K = k\n self.binarize = binarize\n\n def fit(self, X, y):\n X = self._binarize(X)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.n_examples, self.n_features = X.shape\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n self.prior_prob = prior_numerator / self.n_examples\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *\n self.K) for x in X_separated_by_class]), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n X = self._binarize(X)\n posterior_prob_numerator = np.array([((x * np.log(self.\n conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(\n self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in\n X]).sum(axis=1), axis=1)\n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n return np.where(X > self.binarize, 1, 0)\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-4": "<mask token>\nimport numpy as np\n\n\nclass BernoulliNB:\n\n def __init__(self, k=1.0, binarize=0.0):\n self.K = k\n self.binarize = binarize\n\n def fit(self, X, y):\n X = self._binarize(X)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.n_examples, self.n_features = X.shape\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n self.prior_prob = prior_numerator / self.n_examples\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *\n self.K) for x in X_separated_by_class]), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n X = self._binarize(X)\n posterior_prob_numerator = np.array([((x * np.log(self.\n conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(\n self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in\n X]).sum(axis=1), axis=1)\n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n return np.where(X > self.binarize, 1, 0)\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-5": "\"\"\"\n Naive Bayes Class\n - Bernoulli Naive Bayes\n - Multinomial Naive Bayes\n - Gaussian Naive Bayes\n Arthor: Zhenhuan(Steven) Sun\n\"\"\"\n\nimport numpy as np\n\nclass BernoulliNB:\n def __init__(self, k=1.0, binarize=0.0):\n # Laplace Smoothing Factor\n self.K = k\n\n # the degree of binarization\n self.binarize = binarize\n\n def fit(self, X, y):\n # binarize X\n # since we assume data is bernoulli distributed we need to make sure\n # that data consist of binary values\n X = self._binarize(X)\n\n # separate training data by classes(different target)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n\n # number of different class\n self.n_classes = len(np.unique(y))\n\n # count the number of examples and number of features in X\n self.n_examples, self.n_features = X.shape\n\n # count the number of examples that belong to class k (0 or 1 in spam classification)\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n\n # compute the prior probability (P(y))\n self.prior_prob = prior_numerator / self.n_examples\n\n # compute the log prior probability (log(P(y))) for prediction\n self.log_prior_prob = np.log(self.prior_prob)\n\n # compute the conditional probability\n # with laplace smoothing we assume we have seen each feature at least self.K times\n conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)\n self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator\n\n return self\n\n def predict(self, X):\n # binarize X\n X = self._binarize(X)\n\n # compute log posterior probability log(P(y|X))\n posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) + \n np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) + \n self.log_prior_prob for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) + \n np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +\n self.log_prior_prob for x in X]).sum(axis=1), axis=1)\n \n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n\n # alternative solution\n # since posterior_prob_denominator is a constant thus we don't bother compute the denominator\n # compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster\n #return np.argmax(posterior_prob_numerator, axis=1)\n\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n # convert the values in X to binary values (0 or 1)\n return np.where(X > self.binarize, 1, 0)\n\nclass MultinomialNB:\n def __init__(self, k=1.0):\n # Laplace Smoothing Factor\n self.K = k\n\n def fit(self, X, y):\n # separate the training data by class\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n \n # number of different class\n self.n_classes = len(np.unique(y))\n\n # count the number of examples that belong to different classes\n prior_numerator = [len(x) for x in X_separated_by_class]\n\n # count the total number of examples in the training set\n prior_denominator = X.shape[0]\n\n # compute prior probability\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n\n # compute log prior probability for prediction\n self.log_prior_prob = np.log(self.prior_prob)\n\n # compute the conditional probability's numerator for different class (with laplace smoothing)\n # assume we have seen each feature at least once to avoid divide by zero error\n conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])\n\n # compute the conditional probability's denominator for different class\n conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)\n\n # compute the conditional probability for each feature and for each different classes\n self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator\n\n return self\n\n def predict(self, X):\n # compute the log conditional probability for each examples and for each different classes\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])\n\n # compute the posterior probability\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n\n # make prediction\n return np.argmax(posterior_pronb, axis=1)\n\nclass GaussianNB:\n def __init__(self, k=1.0):\n # Laplace Smoothing Factor\n self.K = k\n\n def fit(self, X, y):\n # separate the training set by classes\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n\n # count the number of different classes\n self.n_classes = len(np.unique(y))\n\n # compute prior probability\n self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])\n\n # compute mean vector for each class\n self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])\n\n # compute covariance matrix for each class\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n # compute the covariance matrix for each examples (slow as hell -> abandoned)\n # mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)\n # mean_square_difference += mean_difference.dot(mean_difference.T) \n # compute the diagnal entries of covariance matrix for each examples (much faster than above method)\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n # convert the list of diagonal entries back to covariance diagonal matrix\n # here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that \n # there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future\n covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)\n\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n\n log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))\n log_exponent = -(x - mean)**2 / (2 * variance)\n\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n \n # list that stores all test data's posterior probability\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n # compute conditional probability for each class\n conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))\n # compute posterior probability\n posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n \n return np.argmax(posterior_prob_collection, axis=1)",
"step-ids": [
8,
9,
14,
15,
16
]
}
|
[
8,
9,
14,
15,
16
] |
"""Config for a linear regression model evaluated on a diabetes dataset."""
from dbispipeline.evaluators import GridEvaluator
import dbispipeline.result_handlers as result_handlers
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from nlp4musa2020.dataloaders.alf200k import ALF200KLoader
from nlp4musa2020.dataloaders.alf200k import genre_target_labels
from nlp4musa2020.dataloaders.vectorizer import lda
from nlp4musa2020.dataloaders.vectorizer import tfidf
import nlp4musa2020.evaluators as evaluators
from nlp4musa2020.models.simplenn_genre import SimpleGenreNN
dataloader = ALF200KLoader(
path='data/processed/dataset-lfm-genres.pickle',
load_feature_groups=[
'rhymes',
'statistical',
'statistical_time',
'explicitness',
'audio',
],
text_vectorizers=lda() + tfidf(),
target=genre_target_labels(),
)
pipeline = Pipeline([
('scaler', StandardScaler()),
('model', SimpleGenreNN(epochs=50)),
])
evaluator = GridEvaluator(
parameters={
'model__dense_sizes': [
(32, 32),
(64, 64),
],
'model__dropout_rate': [0.1],
},
grid_parameters=evaluators.grid_parameters_genres(),
)
result_handlers = [
result_handlers.print_gridsearch_results,
]
|
normal
|
{
"blob_id": "473c653da54ebdb7fe8a9eefc166cab167f43357",
"index": 3994,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=['rhymes', 'statistical', 'statistical_time',\n 'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=\n genre_target_labels())\npipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(\n epochs=50))])\nevaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,\n 64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.\n grid_parameters_genres())\nresult_handlers = [result_handlers.print_gridsearch_results]\n",
"step-3": "<mask token>\nfrom dbispipeline.evaluators import GridEvaluator\nimport dbispipeline.result_handlers as result_handlers\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom nlp4musa2020.dataloaders.alf200k import ALF200KLoader\nfrom nlp4musa2020.dataloaders.alf200k import genre_target_labels\nfrom nlp4musa2020.dataloaders.vectorizer import lda\nfrom nlp4musa2020.dataloaders.vectorizer import tfidf\nimport nlp4musa2020.evaluators as evaluators\nfrom nlp4musa2020.models.simplenn_genre import SimpleGenreNN\ndataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=['rhymes', 'statistical', 'statistical_time',\n 'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=\n genre_target_labels())\npipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(\n epochs=50))])\nevaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,\n 64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.\n grid_parameters_genres())\nresult_handlers = [result_handlers.print_gridsearch_results]\n",
"step-4": "\"\"\"Config for a linear regression model evaluated on a diabetes dataset.\"\"\"\nfrom dbispipeline.evaluators import GridEvaluator\nimport dbispipeline.result_handlers as result_handlers\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom nlp4musa2020.dataloaders.alf200k import ALF200KLoader\nfrom nlp4musa2020.dataloaders.alf200k import genre_target_labels\nfrom nlp4musa2020.dataloaders.vectorizer import lda\nfrom nlp4musa2020.dataloaders.vectorizer import tfidf\nimport nlp4musa2020.evaluators as evaluators\nfrom nlp4musa2020.models.simplenn_genre import SimpleGenreNN\n\ndataloader = ALF200KLoader(\n path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=[\n 'rhymes',\n 'statistical',\n 'statistical_time',\n 'explicitness',\n 'audio',\n ],\n text_vectorizers=lda() + tfidf(),\n target=genre_target_labels(),\n)\n\npipeline = Pipeline([\n ('scaler', StandardScaler()),\n ('model', SimpleGenreNN(epochs=50)),\n])\n\nevaluator = GridEvaluator(\n parameters={\n 'model__dense_sizes': [\n (32, 32),\n (64, 64),\n ],\n 'model__dropout_rate': [0.1],\n },\n grid_parameters=evaluators.grid_parameters_genres(),\n)\n\nresult_handlers = [\n result_handlers.print_gridsearch_results,\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {
'endpoints': {'assertion_consumer_service': [(absolute_url(
'/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}
spConfig().load(deepmerge(saml2_config_default, current_app.config[
'SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage'
) is None else 'usePostMessage'
session_id, result = saml_client().prepare_for_authenticate(relay_state
=relay_state)
return make_response('', 302, result['headers'])
<|reserved_special_token_0|>
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {
'endpoints': {'assertion_consumer_service': [(absolute_url(
'/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}
spConfig().load(deepmerge(saml2_config_default, current_app.config[
'SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage'
) is None else 'usePostMessage'
session_id, result = saml_client().prepare_for_authenticate(relay_state
=relay_state)
return make_response('', 302, result['headers'])
@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def saml_response_from_idp():
def _make_response(resp_obj, resp_code):
if 'usePostMessage' in request.form.get('RelayState', ''
) and 'text/html' in request.headers.get('Accept', ''):
origins = current_app.config.get('CORS_ORIGINS', [])
response = make_response(
"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Authenticating...</title>
<script type="application/javascript">
var origins = {origins};
// in case when API and WebUI are on the same origin
if (origins.indexOf(window.location.origin) < 0)
origins.push(window.location.origin);
// only one will succeed
origins.forEach(origin => window.opener.postMessage({msg_data}, origin));
window.close();
</script>
</head>
<body></body>
</html>"""
.format(msg_data=json.dumps(resp_obj), origins=json.dumps(
origins)), resp_code)
response.headers['Content-Type'] = 'text/html'
return response
else:
return jsonify(**resp_obj), resp_code
authn_response = saml_client().parse_authn_request_response(request.
form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)
identity = authn_response.get_identity()
email = identity['emailAddress'][0]
domain = email.split('@')[1]
name = current_app.config.get('SAML2_USER_NAME_FORMAT',
'{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]
), identity.items())))
groups = identity.get('groups', [])
if is_authorized('ALLOWED_SAML2_GROUPS', groups):
return _make_response({'status': 'error', 'message':
'User {} is not authorized'.format(email)}, 403)
customer = get_customer(email, groups=[domain])
token = create_token(email, name, email, provider='saml2', customer=
customer, groups=groups)
return _make_response({'status': 'ok', 'token': token.tokenize}, 200)
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import saml2
import saml2.entity
import saml2.metadata
import saml2.config
import saml2.client
import saml2.saml
except ImportError:
pass
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {
'endpoints': {'assertion_consumer_service': [(absolute_url(
'/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}
spConfig().load(deepmerge(saml2_config_default, current_app.config[
'SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage'
) is None else 'usePostMessage'
session_id, result = saml_client().prepare_for_authenticate(relay_state
=relay_state)
return make_response('', 302, result['headers'])
@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def saml_response_from_idp():
def _make_response(resp_obj, resp_code):
if 'usePostMessage' in request.form.get('RelayState', ''
) and 'text/html' in request.headers.get('Accept', ''):
origins = current_app.config.get('CORS_ORIGINS', [])
response = make_response(
"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Authenticating...</title>
<script type="application/javascript">
var origins = {origins};
// in case when API and WebUI are on the same origin
if (origins.indexOf(window.location.origin) < 0)
origins.push(window.location.origin);
// only one will succeed
origins.forEach(origin => window.opener.postMessage({msg_data}, origin));
window.close();
</script>
</head>
<body></body>
</html>"""
.format(msg_data=json.dumps(resp_obj), origins=json.dumps(
origins)), resp_code)
response.headers['Content-Type'] = 'text/html'
return response
else:
return jsonify(**resp_obj), resp_code
authn_response = saml_client().parse_authn_request_response(request.
form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)
identity = authn_response.get_identity()
email = identity['emailAddress'][0]
domain = email.split('@')[1]
name = current_app.config.get('SAML2_USER_NAME_FORMAT',
'{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]
), identity.items())))
groups = identity.get('groups', [])
if is_authorized('ALLOWED_SAML2_GROUPS', groups):
return _make_response({'status': 'error', 'message':
'User {} is not authorized'.format(email)}, 403)
customer = get_customer(email, groups=[domain])
token = create_token(email, name, email, provider='saml2', customer=
customer, groups=groups)
return _make_response({'status': 'ok', 'token': token.tokenize}, 200)
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
<|reserved_special_token_1|>
import json
from flask import current_app, request, jsonify, make_response
from flask_cors import cross_origin
from alerta.auth.utils import is_authorized, create_token, get_customer
from alerta.utils.api import absolute_url, deepmerge
from . import auth
try:
import saml2
import saml2.entity
import saml2.metadata
import saml2.config
import saml2.client
import saml2.saml
except ImportError:
pass
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {
'endpoints': {'assertion_consumer_service': [(absolute_url(
'/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}
spConfig().load(deepmerge(saml2_config_default, current_app.config[
'SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage'
) is None else 'usePostMessage'
session_id, result = saml_client().prepare_for_authenticate(relay_state
=relay_state)
return make_response('', 302, result['headers'])
@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def saml_response_from_idp():
def _make_response(resp_obj, resp_code):
if 'usePostMessage' in request.form.get('RelayState', ''
) and 'text/html' in request.headers.get('Accept', ''):
origins = current_app.config.get('CORS_ORIGINS', [])
response = make_response(
"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Authenticating...</title>
<script type="application/javascript">
var origins = {origins};
// in case when API and WebUI are on the same origin
if (origins.indexOf(window.location.origin) < 0)
origins.push(window.location.origin);
// only one will succeed
origins.forEach(origin => window.opener.postMessage({msg_data}, origin));
window.close();
</script>
</head>
<body></body>
</html>"""
.format(msg_data=json.dumps(resp_obj), origins=json.dumps(
origins)), resp_code)
response.headers['Content-Type'] = 'text/html'
return response
else:
return jsonify(**resp_obj), resp_code
authn_response = saml_client().parse_authn_request_response(request.
form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)
identity = authn_response.get_identity()
email = identity['emailAddress'][0]
domain = email.split('@')[1]
name = current_app.config.get('SAML2_USER_NAME_FORMAT',
'{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]
), identity.items())))
groups = identity.get('groups', [])
if is_authorized('ALLOWED_SAML2_GROUPS', groups):
return _make_response({'status': 'error', 'message':
'User {} is not authorized'.format(email)}, 403)
customer = get_customer(email, groups=[domain])
token = create_token(email, name, email, provider='saml2', customer=
customer, groups=groups)
return _make_response({'status': 'ok', 'token': token.tokenize}, 200)
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
<|reserved_special_token_1|>
import json
from flask import current_app, request, jsonify, make_response
from flask_cors import cross_origin
from alerta.auth.utils import is_authorized, create_token, get_customer
from alerta.utils.api import absolute_url, deepmerge
from . import auth
try:
import saml2
import saml2.entity
import saml2.metadata
import saml2.config
import saml2.client
import saml2.saml
except ImportError:
pass # saml2 authentication will not work
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {
'entityid': absolute_url(),
'service': {
'sp': {
'endpoints': {
'assertion_consumer_service': [
(absolute_url('/auth/saml'), saml2.BINDING_HTTP_POST)
]
}
}
}
}
spConfig().load(deepmerge(saml2_config_default, current_app.config['SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage') is None else 'usePostMessage'
(session_id, result) = saml_client().prepare_for_authenticate(relay_state=relay_state)
return make_response('', 302, result['headers'])
@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def saml_response_from_idp():
def _make_response(resp_obj, resp_code):
if 'usePostMessage' in request.form.get('RelayState', '') and 'text/html' in request.headers.get('Accept', ''):
origins = current_app.config.get('CORS_ORIGINS', [])
response = make_response(
'''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Authenticating...</title>
<script type="application/javascript">
var origins = {origins};
// in case when API and WebUI are on the same origin
if (origins.indexOf(window.location.origin) < 0)
origins.push(window.location.origin);
// only one will succeed
origins.forEach(origin => window.opener.postMessage({msg_data}, origin));
window.close();
</script>
</head>
<body></body>
</html>'''.format(msg_data=json.dumps(resp_obj), origins=json.dumps(origins)),
resp_code
)
response.headers['Content-Type'] = 'text/html'
return response
else:
return jsonify(**resp_obj), resp_code
authn_response = saml_client().parse_authn_request_response(
request.form['SAMLResponse'],
saml2.entity.BINDING_HTTP_POST
)
identity = authn_response.get_identity()
email = identity['emailAddress'][0]
domain = email.split('@')[1]
name = (current_app.config.get('SAML2_USER_NAME_FORMAT', '{givenName} {surname}')).format(**dict(map(lambda x: (x[0], x[1][0]), identity.items())))
groups = identity.get('groups', [])
if is_authorized('ALLOWED_SAML2_GROUPS', groups):
return _make_response({'status': 'error', 'message': 'User {} is not authorized'.format(email)}, 403)
customer = get_customer(email, groups=[domain])
token = create_token(email, name, email, provider='saml2', customer=customer, groups=groups)
return _make_response({'status': 'ok', 'token': token.tokenize}, 200)
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
|
flexible
|
{
"blob_id": "b233d212f3a6c453786dc54b2d43578e1faae417",
"index": 7292,
"step-1": "<mask token>\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n<mask token>\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-2": "<mask token>\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', ''\n ) and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>\"\"\"\n .format(msg_data=json.dumps(resp_obj), origins=json.dumps(\n origins)), resp_code)\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n authn_response = saml_client().parse_authn_request_response(request.\n form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = current_app.config.get('SAML2_USER_NAME_FORMAT',\n '{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]\n ), identity.items())))\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message':\n 'User {} is not authorized'.format(email)}, 403)\n customer = get_customer(email, groups=[domain])\n token = create_token(email, name, email, provider='saml2', customer=\n customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-3": "<mask token>\ntry:\n import saml2\n import saml2.entity\n import saml2.metadata\n import saml2.config\n import saml2.client\n import saml2.saml\nexcept ImportError:\n pass\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', ''\n ) and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>\"\"\"\n .format(msg_data=json.dumps(resp_obj), origins=json.dumps(\n origins)), resp_code)\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n authn_response = saml_client().parse_authn_request_response(request.\n form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = current_app.config.get('SAML2_USER_NAME_FORMAT',\n '{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]\n ), identity.items())))\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message':\n 'User {} is not authorized'.format(email)}, 403)\n customer = get_customer(email, groups=[domain])\n token = create_token(email, name, email, provider='saml2', customer=\n customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-4": "import json\nfrom flask import current_app, request, jsonify, make_response\nfrom flask_cors import cross_origin\nfrom alerta.auth.utils import is_authorized, create_token, get_customer\nfrom alerta.utils.api import absolute_url, deepmerge\nfrom . import auth\ntry:\n import saml2\n import saml2.entity\n import saml2.metadata\n import saml2.config\n import saml2.client\n import saml2.saml\nexcept ImportError:\n pass\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', ''\n ) and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>\"\"\"\n .format(msg_data=json.dumps(resp_obj), origins=json.dumps(\n origins)), resp_code)\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n authn_response = saml_client().parse_authn_request_response(request.\n form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = current_app.config.get('SAML2_USER_NAME_FORMAT',\n '{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]\n ), identity.items())))\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message':\n 'User {} is not authorized'.format(email)}, 403)\n customer = get_customer(email, groups=[domain])\n token = create_token(email, name, email, provider='saml2', customer=\n customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-5": "\nimport json\n\nfrom flask import current_app, request, jsonify, make_response\nfrom flask_cors import cross_origin\n\nfrom alerta.auth.utils import is_authorized, create_token, get_customer\nfrom alerta.utils.api import absolute_url, deepmerge\nfrom . import auth\n\ntry:\n import saml2\n import saml2.entity\n import saml2.metadata\n import saml2.config\n import saml2.client\n import saml2.saml\nexcept ImportError:\n pass # saml2 authentication will not work\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n\n saml2_config_default = {\n 'entityid': absolute_url(),\n 'service': {\n 'sp': {\n 'endpoints': {\n 'assertion_consumer_service': [\n (absolute_url('/auth/saml'), saml2.BINDING_HTTP_POST)\n ]\n }\n }\n }\n }\n spConfig().load(deepmerge(saml2_config_default, current_app.config['SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage') is None else 'usePostMessage'\n (session_id, result) = saml_client().prepare_for_authenticate(relay_state=relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', '') and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n '''<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>'''.format(msg_data=json.dumps(resp_obj), origins=json.dumps(origins)),\n resp_code\n )\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n\n authn_response = saml_client().parse_authn_request_response(\n request.form['SAMLResponse'],\n saml2.entity.BINDING_HTTP_POST\n )\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = (current_app.config.get('SAML2_USER_NAME_FORMAT', '{givenName} {surname}')).format(**dict(map(lambda x: (x[0], x[1][0]), identity.items())))\n\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message': 'User {} is not authorized'.format(email)}, 403)\n\n customer = get_customer(email, groups=[domain])\n\n token = create_token(email, name, email, provider='saml2', customer=customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
fileName = str(input("Please write the name of the file you would like to open: "))
file_handle = open(fileName, "w")
contents = str(input("Please write the content you would like to save."))
file_handle.write(contents)
file_handle.close()
print(contents)
|
normal
|
{
"blob_id": "aed09a3c04f284fa0b8844a47c5bc9d1621a9b5f",
"index": 2034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)\n",
"step-3": "fileName = str(input(\n 'Please write the name of the file you would like to open: '))\nfile_handle = open(fileName, 'w')\ncontents = str(input('Please write the content you would like to save.'))\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)\n",
"step-4": "fileName = str(input(\"Please write the name of the file you would like to open: \"))\n\nfile_handle = open(fileName, \"w\")\ncontents = str(input(\"Please write the content you would like to save.\"))\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'Orka'
from movie_list import MovieList
from movie_random import MovieRandom
from remove_chosen_movie_from_list import RemoveChosenMovieFromList
from save_list_to_CSV import SaveListToCSV
from length_limit import LengthLimit
file_name = 'cinema.csv'
function = 'r+'
filename_save = 'cinema.csv'
function_save = 'w'
class LaunchMovieLottery(object):
def __init__(self, limit_low=None, limit_high=None):
self.limit_low = limit_low
self.limit_high = limit_high
self.full_list = None
def movie_list(self):
# creates movies list without sequels
movie_list = MovieList(file_name, function)
self.return_movie_list = movie_list.return_movie_list()
self.full_list = movie_list.return_full_list()
return [self.return_movie_list, self.full_list]
def limit_list(self):
self.movie_list()
# limit the movie_list - returns list of movies limited to the specified length
limit_length = LengthLimit(self.return_movie_list, self.limit_low, self.limit_high)
self.shorten_list = limit_length.return_asked_length()
# returns: 'No movie of this length.'
def return_movie(self):
self.limit_list()
# draw a movie from movie list and print it
movie_random = MovieRandom(self.shorten_list)
self.temp_movie_random = movie_random.return_random_movie()
return self.temp_movie_random
def remove_and_save(self, the_movie):
full_list = self.movie_list()[1]
try:
# remove chosen movie from movie list and allow the next movie in the series in next lottery
remove = RemoveChosenMovieFromList(the_movie, full_list)
new_movie_list = remove.remove_movie()
# save to CSV
save_doc = SaveListToCSV(new_movie_list, filename_save, function_save)
save_doc.save_file()
except ValueError:
# Movie not exists
pass
|
normal
|
{
"blob_id": "e35a106a3852a7a004fdae6819d4075e1fe929d6",
"index": 4373,
"step-1": "<mask token>\n\n\nclass LaunchMovieLottery(object):\n <mask token>\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-2": "<mask token>\n\n\nclass LaunchMovieLottery(object):\n\n def __init__(self, limit_low=None, limit_high=None):\n self.limit_low = limit_low\n self.limit_high = limit_high\n self.full_list = None\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-3": "__author__ = 'Orka'\n<mask token>\nfile_name = 'cinema.csv'\nfunction = 'r+'\nfilename_save = 'cinema.csv'\nfunction_save = 'w'\n\n\nclass LaunchMovieLottery(object):\n\n def __init__(self, limit_low=None, limit_high=None):\n self.limit_low = limit_low\n self.limit_high = limit_high\n self.full_list = None\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-4": "__author__ = 'Orka'\nfrom movie_list import MovieList\nfrom movie_random import MovieRandom\nfrom remove_chosen_movie_from_list import RemoveChosenMovieFromList\nfrom save_list_to_CSV import SaveListToCSV\nfrom length_limit import LengthLimit\nfile_name = 'cinema.csv'\nfunction = 'r+'\nfilename_save = 'cinema.csv'\nfunction_save = 'w'\n\n\nclass LaunchMovieLottery(object):\n\n def __init__(self, limit_low=None, limit_high=None):\n self.limit_low = limit_low\n self.limit_high = limit_high\n self.full_list = None\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-5": "__author__ = 'Orka'\r\nfrom movie_list import MovieList\r\nfrom movie_random import MovieRandom\r\nfrom remove_chosen_movie_from_list import RemoveChosenMovieFromList\r\nfrom save_list_to_CSV import SaveListToCSV\r\nfrom length_limit import LengthLimit\r\n\r\nfile_name = 'cinema.csv'\r\nfunction = 'r+'\r\nfilename_save = 'cinema.csv'\r\nfunction_save = 'w'\r\n\r\n\r\nclass LaunchMovieLottery(object):\r\n def __init__(self, limit_low=None, limit_high=None):\r\n self.limit_low = limit_low\r\n self.limit_high = limit_high\r\n self.full_list = None\r\n\r\n def movie_list(self):\r\n # creates movies list without sequels\r\n movie_list = MovieList(file_name, function)\r\n self.return_movie_list = movie_list.return_movie_list()\r\n self.full_list = movie_list.return_full_list()\r\n return [self.return_movie_list, self.full_list]\r\n\r\n def limit_list(self):\r\n self.movie_list()\r\n # limit the movie_list - returns list of movies limited to the specified length\r\n limit_length = LengthLimit(self.return_movie_list, self.limit_low, self.limit_high)\r\n self.shorten_list = limit_length.return_asked_length()\r\n # returns: 'No movie of this length.'\r\n\r\n def return_movie(self):\r\n self.limit_list()\r\n # draw a movie from movie list and print it\r\n movie_random = MovieRandom(self.shorten_list)\r\n self.temp_movie_random = movie_random.return_random_movie()\r\n return self.temp_movie_random\r\n\r\n def remove_and_save(self, the_movie):\r\n full_list = self.movie_list()[1]\r\n\r\n try:\r\n # remove chosen movie from movie list and allow the next movie in the series in next lottery\r\n remove = RemoveChosenMovieFromList(the_movie, full_list)\r\n new_movie_list = remove.remove_movie()\r\n\r\n # save to CSV\r\n save_doc = SaveListToCSV(new_movie_list, filename_save, function_save)\r\n save_doc.save_file()\r\n\r\n except ValueError:\r\n # Movie not exists\r\n pass\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import pandas as pd
import numpy as np
import geopandas as gp
from sys import argv
import os
import subprocess
n, e, s, w = map(int, argv[1:5])
output_dir = argv[5]
print(f'{(n, e, s, w)=}')
for lat in range(s, n + 1):
for lon in range(w, e + 1):
latdir = 'n' if lat >= 0 else 's'
londir = 'e' if lon >= 0 else 'w'
fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'
print(fname)
url = f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'
print(url)
outf = os.path.join(output_dir, f'{fname}.tif')
subprocess.run(['curl', '--output', outf, url])
|
normal
|
{
"blob_id": "9f36b846619ca242426041f577ab7d9e4dad6a43",
"index": 3797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'(n, e, s, w)={n, e, s, w!r}')\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n url = (\n f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n )\n print(url)\n outf = os.path.join(output_dir, f'{fname}.tif')\n subprocess.run(['curl', '--output', outf, url])\n",
"step-3": "<mask token>\nn, e, s, w = map(int, argv[1:5])\noutput_dir = argv[5]\nprint(f'(n, e, s, w)={n, e, s, w!r}')\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n url = (\n f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n )\n print(url)\n outf = os.path.join(output_dir, f'{fname}.tif')\n subprocess.run(['curl', '--output', outf, url])\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport geopandas as gp\nfrom sys import argv\nimport os\nimport subprocess\nn, e, s, w = map(int, argv[1:5])\noutput_dir = argv[5]\nprint(f'(n, e, s, w)={n, e, s, w!r}')\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n url = (\n f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n )\n print(url)\n outf = os.path.join(output_dir, f'{fname}.tif')\n subprocess.run(['curl', '--output', outf, url])\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport geopandas as gp\nfrom sys import argv\nimport os\nimport subprocess\n\nn, e, s, w = map(int, argv[1:5])\noutput_dir = argv[5]\n\nprint(f'{(n, e, s, w)=}')\n\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n\n url = f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n\n print(url)\n\n outf = os.path.join(output_dir, f'{fname}.tif')\n\n subprocess.run(['curl', '--output', outf, url])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core', '0028_auto_20210506_1020')]
operations = [migrations.AlterField(model_name='user', name='city',
field=models.ForeignKey(null=True, on_delete=django.db.models.
deletion.CASCADE, to='core.cities'))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('core', '0028_auto_20210506_1020')]
operations = [migrations.AlterField(model_name='user', name='city',
field=models.ForeignKey(null=True, on_delete=django.db.models.
deletion.CASCADE, to='core.cities'))]
<|reserved_special_token_1|>
# Generated by Django 3.1.6 on 2021-05-06 10:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0028_auto_20210506_1020'),
]
operations = [
migrations.AlterField(
model_name='user',
name='city',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.cities'),
),
]
|
flexible
|
{
"blob_id": "39ac4e0d543048ea02123baa39b6c8ce7618d16b",
"index": 6802,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0028_auto_20210506_1020')]\n operations = [migrations.AlterField(model_name='user', name='city',\n field=models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.cities'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0028_auto_20210506_1020')]\n operations = [migrations.AlterField(model_name='user', name='city',\n field=models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.cities'))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-05-06 10:29\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0028_auto_20210506_1020'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='city',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.cities'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LanguageDefinition:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def create_project_files(project_path: str, added_file_paths: List[str]
=None) ->str:
"""
Create supporting project files for a translated file.
:param project_path: Project path.
:param added_file_paths: List of paths for files to add to the project.
:returns: Project path.
"""
pass
@staticmethod
def format_file(file_path: str, request_data=None) ->List[str]:
"""
Format file.
:param file_path: File path.
:param request_data: Request body data from "/translate" API endpoint.
:returns: Formatted file lines.
"""
return open(file_path).readlines()
@staticmethod
def format_project_files(project_path: str) ->List[str]:
"""
Format project files.
:param project_path: Project path.
"""
pass
@staticmethod
def to_single_line_comment(text: str) ->str:
"""
Convert a line of text to a single-line comment.
:param text: Line of text.
:returns: Single-line comment.
"""
pass
@staticmethod
def to_multi_line_comment(text: str) ->str:
"""
Convert a line of text to a multi-line comment.
:param text: Line of text.
:returns: Multi-line comment.
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LanguageDefinition:
<|reserved_special_token_0|>
@staticmethod
def get_translated_file_name(filename: str):
"""
:returns: Translated file name.
"""
return filename
@staticmethod
def create_project_files(project_path: str, added_file_paths: List[str]
=None) ->str:
"""
Create supporting project files for a translated file.
:param project_path: Project path.
:param added_file_paths: List of paths for files to add to the project.
:returns: Project path.
"""
pass
@staticmethod
def format_file(file_path: str, request_data=None) ->List[str]:
"""
Format file.
:param file_path: File path.
:param request_data: Request body data from "/translate" API endpoint.
:returns: Formatted file lines.
"""
return open(file_path).readlines()
@staticmethod
def format_project_files(project_path: str) ->List[str]:
"""
Format project files.
:param project_path: Project path.
"""
pass
@staticmethod
def to_single_line_comment(text: str) ->str:
"""
Convert a line of text to a single-line comment.
:param text: Line of text.
:returns: Single-line comment.
"""
pass
@staticmethod
def to_multi_line_comment(text: str) ->str:
"""
Convert a line of text to a multi-line comment.
:param text: Line of text.
:returns: Multi-line comment.
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LanguageDefinition:
"""Language definition containing general constants and methods."""
@staticmethod
def get_translated_file_name(filename: str):
"""
:returns: Translated file name.
"""
return filename
@staticmethod
def create_project_files(project_path: str, added_file_paths: List[str]
=None) ->str:
"""
Create supporting project files for a translated file.
:param project_path: Project path.
:param added_file_paths: List of paths for files to add to the project.
:returns: Project path.
"""
pass
@staticmethod
def format_file(file_path: str, request_data=None) ->List[str]:
"""
Format file.
:param file_path: File path.
:param request_data: Request body data from "/translate" API endpoint.
:returns: Formatted file lines.
"""
return open(file_path).readlines()
@staticmethod
def format_project_files(project_path: str) ->List[str]:
"""
Format project files.
:param project_path: Project path.
"""
pass
@staticmethod
def to_single_line_comment(text: str) ->str:
"""
Convert a line of text to a single-line comment.
:param text: Line of text.
:returns: Single-line comment.
"""
pass
@staticmethod
def to_multi_line_comment(text: str) ->str:
"""
Convert a line of text to a multi-line comment.
:param text: Line of text.
:returns: Multi-line comment.
"""
pass
<|reserved_special_token_1|>
from typing import List
class LanguageDefinition:
"""Language definition containing general constants and methods."""
@staticmethod
def get_translated_file_name(filename: str):
"""
:returns: Translated file name.
"""
return filename
@staticmethod
def create_project_files(project_path: str, added_file_paths: List[str]
=None) ->str:
"""
Create supporting project files for a translated file.
:param project_path: Project path.
:param added_file_paths: List of paths for files to add to the project.
:returns: Project path.
"""
pass
@staticmethod
def format_file(file_path: str, request_data=None) ->List[str]:
"""
Format file.
:param file_path: File path.
:param request_data: Request body data from "/translate" API endpoint.
:returns: Formatted file lines.
"""
return open(file_path).readlines()
@staticmethod
def format_project_files(project_path: str) ->List[str]:
"""
Format project files.
:param project_path: Project path.
"""
pass
@staticmethod
def to_single_line_comment(text: str) ->str:
"""
Convert a line of text to a single-line comment.
:param text: Line of text.
:returns: Single-line comment.
"""
pass
@staticmethod
def to_multi_line_comment(text: str) ->str:
"""
Convert a line of text to a multi-line comment.
:param text: Line of text.
:returns: Multi-line comment.
"""
pass
|
flexible
|
{
"blob_id": "672add6aa05e21d3605c05a23ff86281ffc3b17c",
"index": 9827,
"step-1": "<mask token>\n\n\nclass LanguageDefinition:\n <mask token>\n <mask token>\n\n @staticmethod\n def create_project_files(project_path: str, added_file_paths: List[str]\n =None) ->str:\n \"\"\"\n Create supporting project files for a translated file.\n\n :param project_path: Project path.\n :param added_file_paths: List of paths for files to add to the project.\n :returns: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def format_file(file_path: str, request_data=None) ->List[str]:\n \"\"\"\n Format file.\n\n :param file_path: File path.\n :param request_data: Request body data from \"/translate\" API endpoint.\n :returns: Formatted file lines.\n \"\"\"\n return open(file_path).readlines()\n\n @staticmethod\n def format_project_files(project_path: str) ->List[str]:\n \"\"\"\n Format project files.\n\n :param project_path: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def to_single_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a single-line comment.\n\n :param text: Line of text.\n :returns: Single-line comment.\n \"\"\"\n pass\n\n @staticmethod\n def to_multi_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a multi-line comment.\n\n :param text: Line of text.\n :returns: Multi-line comment.\n \"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass LanguageDefinition:\n <mask token>\n\n @staticmethod\n def get_translated_file_name(filename: str):\n \"\"\"\n :returns: Translated file name.\n \"\"\"\n return filename\n\n @staticmethod\n def create_project_files(project_path: str, added_file_paths: List[str]\n =None) ->str:\n \"\"\"\n Create supporting project files for a translated file.\n\n :param project_path: Project path.\n :param added_file_paths: List of paths for files to add to the project.\n :returns: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def format_file(file_path: str, request_data=None) ->List[str]:\n \"\"\"\n Format file.\n\n :param file_path: File path.\n :param request_data: Request body data from \"/translate\" API endpoint.\n :returns: Formatted file lines.\n \"\"\"\n return open(file_path).readlines()\n\n @staticmethod\n def format_project_files(project_path: str) ->List[str]:\n \"\"\"\n Format project files.\n\n :param project_path: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def to_single_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a single-line comment.\n\n :param text: Line of text.\n :returns: Single-line comment.\n \"\"\"\n pass\n\n @staticmethod\n def to_multi_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a multi-line comment.\n\n :param text: Line of text.\n :returns: Multi-line comment.\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass LanguageDefinition:\n \"\"\"Language definition containing general constants and methods.\"\"\"\n\n @staticmethod\n def get_translated_file_name(filename: str):\n \"\"\"\n :returns: Translated file name.\n \"\"\"\n return filename\n\n @staticmethod\n def create_project_files(project_path: str, added_file_paths: List[str]\n =None) ->str:\n \"\"\"\n Create supporting project files for a translated file.\n\n :param project_path: Project path.\n :param added_file_paths: List of paths for files to add to the project.\n :returns: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def format_file(file_path: str, request_data=None) ->List[str]:\n \"\"\"\n Format file.\n\n :param file_path: File path.\n :param request_data: Request body data from \"/translate\" API endpoint.\n :returns: Formatted file lines.\n \"\"\"\n return open(file_path).readlines()\n\n @staticmethod\n def format_project_files(project_path: str) ->List[str]:\n \"\"\"\n Format project files.\n\n :param project_path: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def to_single_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a single-line comment.\n\n :param text: Line of text.\n :returns: Single-line comment.\n \"\"\"\n pass\n\n @staticmethod\n def to_multi_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a multi-line comment.\n\n :param text: Line of text.\n :returns: Multi-line comment.\n \"\"\"\n pass\n",
"step-4": "from typing import List\n\n\nclass LanguageDefinition:\n \"\"\"Language definition containing general constants and methods.\"\"\"\n\n @staticmethod\n def get_translated_file_name(filename: str):\n \"\"\"\n :returns: Translated file name.\n \"\"\"\n return filename\n\n @staticmethod\n def create_project_files(project_path: str, added_file_paths: List[str]\n =None) ->str:\n \"\"\"\n Create supporting project files for a translated file.\n\n :param project_path: Project path.\n :param added_file_paths: List of paths for files to add to the project.\n :returns: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def format_file(file_path: str, request_data=None) ->List[str]:\n \"\"\"\n Format file.\n\n :param file_path: File path.\n :param request_data: Request body data from \"/translate\" API endpoint.\n :returns: Formatted file lines.\n \"\"\"\n return open(file_path).readlines()\n\n @staticmethod\n def format_project_files(project_path: str) ->List[str]:\n \"\"\"\n Format project files.\n\n :param project_path: Project path.\n \"\"\"\n pass\n\n @staticmethod\n def to_single_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a single-line comment.\n\n :param text: Line of text.\n :returns: Single-line comment.\n \"\"\"\n pass\n\n @staticmethod\n def to_multi_line_comment(text: str) ->str:\n \"\"\"\n Convert a line of text to a multi-line comment.\n\n :param text: Line of text.\n :returns: Multi-line comment.\n \"\"\"\n pass\n",
"step-5": null,
"step-ids": [
6,
7,
8,
9
]
}
|
[
6,
7,
8,
9
] |
#!/usr/bin/python
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
#Your code here
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
## K-means functions
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
#Your code here
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:,0], clusters[0][:,1], 'rs', clusters[1][:,0], clusters[1][:,1], 'bs')
plt.show()
return
def has_converged(centroids, old_centroids, iterations):
MAX_ITERATIONS = 100
if iterations > MAX_ITERATIONS:
return True
return old_centroids == centroids
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) \
for i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X,k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not (has_converged(centroids, old_centroids, iterations)):
iterations += 1
clusters = [[] for i in range(k)]
# assign data points to clusters
clusters = euclidean_dist(X, centroids, clusters)
# recalculate centroids
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X,k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not (has_converged(centroids, old_centroids, iterations)):
iterations += 1
clusters = [[] for i in range(k)]
# assign data points to clusters
clusters = euclidean_dist(X, centroids, clusters)
# recalculate centroids
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
#visualizeClusters(clusters)
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X,kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1,7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
#Your code
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if(int(clusters[i][idx][2]) == 1):
count += 1
purity = count*1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1-purity)
#<type 'list'>: [0.9724249797242498, 0.999000999000999]
return purities
'''
## GMM functions
#calculate the initial covariance matrix
#covType: diag, full
def getInitialsGMM(X,k,covType):
if covType == 'full':
dataArray = np.transpose(np.array([pt[0:-1] for pt in X]))
covMat = np.cov(dataArray)
else:
covMatList = []
for i in range(len(X[0])-1):
data = [pt[i] for pt in X]
cov = np.asscalar(np.cov(data))
covMatList.append(cov)
covMat = np.diag(covMatList)
initialClusters = {}
#Your code here
return initialClusters
def calcLogLikelihood(X,clusters,k):
loglikelihood = 0
#Your code here
return loglikelihood
#E-step
def updateEStep(X,clusters,k):
EMatrix = []
#Your code here
return EMatrix
#M-step
def updateMStep(X,clusters,EMatrix):
#Your code here
return clusters
def visualizeClustersGMM(X,labels,clusters,covType):
#Your code here
def gmmCluster(X, k, covType, maxIter=1000):
#initial clusters
clustersGMM = getInitialsGMM(X,k,covType)
labels = []
#Your code here
visualizeClustersGMM(X,labels,clustersGMM,covType)
return labels,clustersGMM
def purityGMM(X, clusters, labels):
purities = []
#Your code here
return purities
'''
def main():
#######dataset path
#datadir = sys.argv[1]
datadir = ''
pathDataset1 = datadir+'humanData.txt'
#pathDataset2 = datadir+'/audioData.txt'
dataset1 = loadData(pathDataset1)
#dataset2 = loadData(pathDataset2)
#Q4
kneeFinding(dataset1,range(1,7))
#Q5
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1,clusters)
'''
#Q7
labels11,clustersGMM11 = gmmCluster(dataset1, 2, 'diag')
labels12,clustersGMM12 = gmmCluster(dataset1, 2, 'full')
#Q8
labels21,clustersGMM21 = gmmCluster(dataset2, 2, 'diag')
labels22,clustersGMM22 = gmmCluster(dataset2, 2, 'full')
#Q9
purities11 = purityGMM(dataset1, clustersGMM11, labels11)
purities12 = purityGMM(dataset1, clustersGMM12, labels12)
purities21 = purityGMM(dataset2, clustersGMM21, labels21)
purities22 = purityGMM(dataset2, clustersGMM22, labels22)
'''
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "000dd63089fd0c6184fd032fe75ccc920beee7a8",
"index": 127,
"step-1": "<mask token>\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\n<mask token>\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\n<mask token>\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n\n\ndef main():\n datadir = ''\n pathDataset1 = datadir + 'humanData.txt'\n dataset1 = loadData(pathDataset1)\n kneeFinding(dataset1, range(1, 7))\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1, clusters)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\ndef has_converged(centroids, old_centroids, iterations):\n MAX_ITERATIONS = 100\n if iterations > MAX_ITERATIONS:\n return True\n return old_centroids == centroids\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n\n\ndef main():\n datadir = ''\n pathDataset1 = datadir + 'humanData.txt'\n dataset1 = loadData(pathDataset1)\n kneeFinding(dataset1, range(1, 7))\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1, clusters)\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\ndef has_converged(centroids, old_centroids, iterations):\n MAX_ITERATIONS = 100\n if iterations > MAX_ITERATIONS:\n return True\n return old_centroids == centroids\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n\n\ndef main():\n datadir = ''\n pathDataset1 = datadir + 'humanData.txt'\n dataset1 = loadData(pathDataset1)\n kneeFinding(dataset1, range(1, 7))\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1, clusters)\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\n\nimport sys\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n#Your code here\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n\n\n return data\n\n## K-means functions \n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n\n #Your code here\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n\n plt.plot(clusters[0][:,0], clusters[0][:,1], 'rs', clusters[1][:,0], clusters[1][:,1], 'bs')\n plt.show()\n return\n\ndef has_converged(centroids, old_centroids, iterations):\n MAX_ITERATIONS = 100\n if iterations > MAX_ITERATIONS:\n return True\n return old_centroids == centroids\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) \\\n for i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist())\n\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n\n centroids = getInitialCentroids(X,k)\n\n old_centroids = [[] for i in range(k)]\n\n iterations = 0\n while not (has_converged(centroids, old_centroids, iterations)):\n iterations += 1\n\n clusters = [[] for i in range(k)]\n\n # assign data points to clusters\n clusters = euclidean_dist(X, centroids, clusters)\n\n # recalculate centroids\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n\n visualizeClusters(clusters)\n\n return clusters\n\ndef kmeans_(X, k, maxIter=1000):\n\n centroids = getInitialCentroids(X,k)\n\n old_centroids = [[] for i in range(k)]\n\n iterations = 0\n while not (has_converged(centroids, old_centroids, iterations)):\n iterations += 1\n\n clusters = [[] for i in range(k)]\n\n # assign data points to clusters\n clusters = euclidean_dist(X, centroids, clusters)\n\n # recalculate centroids\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n\n #visualizeClusters(clusters)\n\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n\n distSum = 0\n\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n\n return distSum\n\ndef kneeFinding(X,kList):\n obj = []\n\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n\n plt.plot(range(1,7), obj)\n plt.show()\n\n return\n\ndef purity(X, clusters):\n purities = []\n #Your code\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if(int(clusters[i][idx][2]) == 1):\n count += 1\n\n purity = count*1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1-purity)\n\n #<type 'list'>: [0.9724249797242498, 0.999000999000999]\n return purities\n\n'''\n\n## GMM functions \n\n#calculate the initial covariance matrix\n#covType: diag, full\ndef getInitialsGMM(X,k,covType):\n if covType == 'full':\n dataArray = np.transpose(np.array([pt[0:-1] for pt in X]))\n covMat = np.cov(dataArray)\n else:\n covMatList = []\n for i in range(len(X[0])-1):\n data = [pt[i] for pt in X]\n cov = np.asscalar(np.cov(data))\n covMatList.append(cov)\n covMat = np.diag(covMatList)\n\n initialClusters = {}\n #Your code here\n return initialClusters\n\n\ndef calcLogLikelihood(X,clusters,k):\n loglikelihood = 0\n #Your code here\n return loglikelihood\n\n#E-step\ndef updateEStep(X,clusters,k):\n EMatrix = []\n #Your code here\n return EMatrix\n\n#M-step\ndef updateMStep(X,clusters,EMatrix):\n #Your code here\n return clusters\n\ndef visualizeClustersGMM(X,labels,clusters,covType):\n #Your code here\n\n\ndef gmmCluster(X, k, covType, maxIter=1000):\n #initial clusters\n clustersGMM = getInitialsGMM(X,k,covType)\n labels = []\n #Your code here\n visualizeClustersGMM(X,labels,clustersGMM,covType)\n return labels,clustersGMM\n\n\ndef purityGMM(X, clusters, labels):\n purities = []\n #Your code here\n return purities\n\n\n'''\n\ndef main():\n #######dataset path\n #datadir = sys.argv[1]\n datadir = ''\n pathDataset1 = datadir+'humanData.txt'\n #pathDataset2 = datadir+'/audioData.txt'\n dataset1 = loadData(pathDataset1)\n #dataset2 = loadData(pathDataset2)\n\n\n #Q4\n kneeFinding(dataset1,range(1,7))\n\n #Q5\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1,clusters)\n'''\n #Q7\n labels11,clustersGMM11 = gmmCluster(dataset1, 2, 'diag')\n labels12,clustersGMM12 = gmmCluster(dataset1, 2, 'full')\n\n #Q8\n labels21,clustersGMM21 = gmmCluster(dataset2, 2, 'diag')\n labels22,clustersGMM22 = gmmCluster(dataset2, 2, 'full')\n\n #Q9\n purities11 = purityGMM(dataset1, clustersGMM11, labels11)\n purities12 = purityGMM(dataset1, clustersGMM12, labels12)\n purities21 = purityGMM(dataset2, clustersGMM21, labels21)\n purities22 = purityGMM(dataset2, clustersGMM22, labels22)\n'''\nif __name__ == \"__main__\":\n main()",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
class Node:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
return self.val
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
if not self.in_support(cand):
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
for child in self.children:
reject_likelihood += child.likelihood()
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
if u >= accept_likelihood - reject_likelihood:
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
elif not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
return self.val
def cleanse_val(self, val):
return val
<|reserved_special_token_0|>
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if len(self.posteriors) == 0:
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [map_args(n) for n in list(args)]
Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(self, name + ' (Fixed)', val=val)
def likelihood(self):
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(self, name)
self.parents = [self.base, self.exponent]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def complete_conditional(self, target):
return reduce(lambda l, child: l + child.likelihood(), self.
children, self.likelihood())
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
return self.val
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
if not self.in_support(cand):
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
for child in self.children:
reject_likelihood += child.likelihood()
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
if u >= accept_likelihood - reject_likelihood:
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
elif not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
return self.val
def cleanse_val(self, val):
return val
<|reserved_special_token_0|>
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if len(self.posteriors) == 0:
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [map_args(n) for n in list(args)]
Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(self, name + ' (Fixed)', val=val)
def likelihood(self):
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(self, name)
self.parents = [self.base, self.exponent]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, name, val=None, observed=False,
candidate_standard_deviation=1, save_samples=False):
self.name = name
self.val = val
self.observed = observed
self.candidate_standard_deviation = candidate_standard_deviation
self.children = []
self.posteriors = []
self.rejected = 0
self.stayed = 0
self.accepted = 0
<|reserved_special_token_0|>
def complete_conditional(self, target):
return reduce(lambda l, child: l + child.likelihood(), self.
children, self.likelihood())
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
return self.val
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
if not self.in_support(cand):
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
for child in self.children:
reject_likelihood += child.likelihood()
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
if u >= accept_likelihood - reject_likelihood:
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
elif not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
return self.val
def cleanse_val(self, val):
return val
def value(self):
return self.val
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if len(self.posteriors) == 0:
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
def plot_posterior(self, write=False):
if len(self.posteriors) == 0:
return
plt.title('Posterior {}'.format(self.name))
plt.hist(self.posteriors, bins=30, normed=True, label=
'Posterior Dist ' + self.name)
if write:
plt.savefig(self.name + '-posterior.png')
plt.close()
else:
plt.show()
def __add__(self, other):
return Add(self, other)
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [map_args(n) for n in list(args)]
Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(self, name + ' (Fixed)', val=val)
def likelihood(self):
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(self, name)
self.parents = [self.base, self.exponent]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, name, val=None, observed=False,
candidate_standard_deviation=1, save_samples=False):
self.name = name
self.val = val
self.observed = observed
self.candidate_standard_deviation = candidate_standard_deviation
self.children = []
self.posteriors = []
self.rejected = 0
self.stayed = 0
self.accepted = 0
def likelihood(self):
raise NotImplementedError
def complete_conditional(self, target):
return reduce(lambda l, child: l + child.likelihood(), self.
children, self.likelihood())
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
return self.val
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
if not self.in_support(cand):
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
for child in self.children:
reject_likelihood += child.likelihood()
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
if u >= accept_likelihood - reject_likelihood:
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
elif not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
return self.val
def cleanse_val(self, val):
return val
def value(self):
return self.val
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if len(self.posteriors) == 0:
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
def plot_posterior(self, write=False):
if len(self.posteriors) == 0:
return
plt.title('Posterior {}'.format(self.name))
plt.hist(self.posteriors, bins=30, normed=True, label=
'Posterior Dist ' + self.name)
if write:
plt.savefig(self.name + '-posterior.png')
plt.close()
else:
plt.show()
def __add__(self, other):
return Add(self, other)
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [map_args(n) for n in list(args)]
Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(self, name + ' (Fixed)', val=val)
def likelihood(self):
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(self, name)
self.parents = [self.base, self.exponent]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
<|reserved_special_token_1|>
from functools import reduce
from math import (log, sqrt)
import matplotlib.pyplot as plt
import matplotlib.pylab as mlab
import numpy
import random
import scipy.stats
class Node:
def __init__(
self,
name,
val=None,
observed=False,
candidate_standard_deviation=1,
save_samples=False
):
self.name = name
self.val = val
self.observed = observed
self.candidate_standard_deviation = candidate_standard_deviation
self.children = []
self.posteriors = []
self.rejected = 0
self.stayed = 0
self.accepted = 0
#if save_samples:
# self.file = open(self.name, 'w')
#else:
# self.file = None
def likelihood(self):
raise NotImplementedError
def complete_conditional(self, target):
return reduce(
lambda l, child: l + child.likelihood(),
self.children,
self.likelihood()
)
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
#self.save_sample()
return self.val
# get a candidate value
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
#print(self.name, 'cand', cand)
if not self.in_support(cand):
#print('*****', self.name, 'reject', cand)
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
#self.save_sample()
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
# factor in the children with the curernt value
for child in self.children:
reject_likelihood += child.likelihood()
# get the likelihood of the candidate value
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
#print(self.name, 'r', reject_likelihood)
#print(self.name, 'a', accept_likelihood)
#print(self.name, 'u', u)
# set it back if staying is more likely
if u >= accept_likelihood - reject_likelihood:
#print(self.name, 'set it back')
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
else:
#print(self.name, 'keep the cand', cand)
if not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
#self.save_sample()
return self.val
def cleanse_val(self, val):
return val
# Need a function to handle the Add node's value retrieval
def value(self):
return self.val
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if (len(self.posteriors) == 0):
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
def plot_posterior(self, write=False):
if (len(self.posteriors) == 0):
return
#sample_min = min(self.posteriors)
#sample_max = max(self.posteriors)
#xs = mlab.frange(sample_min, sample_max, (sample_max - sample_min) / 100)
#ys = [self.pdf(x) for x in xs]
#plt.plot(xs, ys, label='Priot Dist ' + self.name)
#plt.title('Prior Dist {}:{}'.format(self.name, self.candidate_standard_deviation))
plt.title('Posterior {}'.format(self.name))
plt.hist(self.posteriors, bins=30, normed=True, label="Posterior Dist " + self.name)
if write:
plt.savefig(self.name + '-posterior.png')
plt.close()
else:
plt.show()
def __add__(self, other):
return Add(self, other)
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [ map_args(n) for n in list(args)]
Node.__init__(
self,
':'.join([ p.name for p in self.parents ]) + ' (Add)',
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
# The purpose of this node is to just have something that gives a fixed value
# With a probability of 1. This is useful for priors.
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(
self,
name + ' (Fixed)',
val=val
)
def likelihood(self):
# It's in log space, remember
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(
self,
name,
)
self.parents = [ self.base, self.exponent ]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
|
flexible
|
{
"blob_id": "4c5db1af9fd1c9b09f6e64a44d72351807c0f7a5",
"index": 8136,
"step-1": "<mask token>\n\n\nclass Node:\n <mask token>\n <mask token>\n <mask token>\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n <mask token>\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n <mask token>\n <mask token>\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-2": "<mask token>\n\n\nclass Node:\n <mask token>\n <mask token>\n\n def complete_conditional(self, target):\n return reduce(lambda l, child: l + child.likelihood(), self.\n children, self.likelihood())\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n <mask token>\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n <mask token>\n <mask token>\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, name, val=None, observed=False,\n candidate_standard_deviation=1, save_samples=False):\n self.name = name\n self.val = val\n self.observed = observed\n self.candidate_standard_deviation = candidate_standard_deviation\n self.children = []\n self.posteriors = []\n self.rejected = 0\n self.stayed = 0\n self.accepted = 0\n <mask token>\n\n def complete_conditional(self, target):\n return reduce(lambda l, child: l + child.likelihood(), self.\n children, self.likelihood())\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n\n def value(self):\n return self.val\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n\n def plot_posterior(self, write=False):\n if len(self.posteriors) == 0:\n return\n plt.title('Posterior {}'.format(self.name))\n plt.hist(self.posteriors, bins=30, normed=True, label=\n 'Posterior Dist ' + self.name)\n if write:\n plt.savefig(self.name + '-posterior.png')\n plt.close()\n else:\n plt.show()\n\n def __add__(self, other):\n return Add(self, other)\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-4": "<mask token>\n\n\nclass Node:\n\n def __init__(self, name, val=None, observed=False,\n candidate_standard_deviation=1, save_samples=False):\n self.name = name\n self.val = val\n self.observed = observed\n self.candidate_standard_deviation = candidate_standard_deviation\n self.children = []\n self.posteriors = []\n self.rejected = 0\n self.stayed = 0\n self.accepted = 0\n\n def likelihood(self):\n raise NotImplementedError\n\n def complete_conditional(self, target):\n return reduce(lambda l, child: l + child.likelihood(), self.\n children, self.likelihood())\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n\n def value(self):\n return self.val\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n\n def plot_posterior(self, write=False):\n if len(self.posteriors) == 0:\n return\n plt.title('Posterior {}'.format(self.name))\n plt.hist(self.posteriors, bins=30, normed=True, label=\n 'Posterior Dist ' + self.name)\n if write:\n plt.savefig(self.name + '-posterior.png')\n plt.close()\n else:\n plt.show()\n\n def __add__(self, other):\n return Add(self, other)\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-5": "from functools import reduce\nfrom math import (log, sqrt)\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as mlab\nimport numpy\nimport random\nimport scipy.stats\n\nclass Node:\n def __init__(\n self,\n name,\n val=None,\n observed=False,\n candidate_standard_deviation=1,\n save_samples=False\n ):\n self.name = name\n self.val = val\n self.observed = observed\n self.candidate_standard_deviation = candidate_standard_deviation\n self.children = []\n self.posteriors = []\n self.rejected = 0\n self.stayed = 0\n self.accepted = 0\n\n #if save_samples:\n # self.file = open(self.name, 'w')\n #else:\n # self.file = None\n\n def likelihood(self):\n raise NotImplementedError\n\n def complete_conditional(self, target):\n return reduce(\n lambda l, child: l + child.likelihood(),\n self.children,\n self.likelihood()\n )\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n #self.save_sample()\n return self.val\n\n # get a candidate value\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n\n #print(self.name, 'cand', cand)\n\n if not self.in_support(cand):\n #print('*****', self.name, 'reject', cand)\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n #self.save_sample()\n return self.val\n\n old_val = self.val\n\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n\n # factor in the children with the curernt value\n for child in self.children:\n reject_likelihood += child.likelihood()\n\n # get the likelihood of the candidate value\n self.val = cand\n\n for child in self.children:\n accept_likelihood += child.likelihood()\n\n u = log(random.random())\n\n #print(self.name, 'r', reject_likelihood)\n #print(self.name, 'a', accept_likelihood)\n #print(self.name, 'u', u)\n\n # set it back if staying is more likely\n if u >= accept_likelihood - reject_likelihood:\n #print(self.name, 'set it back')\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n else:\n #print(self.name, 'keep the cand', cand)\n if not isBurn:\n self.accepted = self.accepted + 1\n\n if not isBurn:\n self.posteriors.append(self.val)\n #self.save_sample()\n\n return self.val\n\n def cleanse_val(self, val):\n return val\n\n # Need a function to handle the Add node's value retrieval\n def value(self):\n return self.val\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if (len(self.posteriors) == 0):\n return\n\n xs, ys = zip(*enumerate(self.posteriors))\n\n plt.plot(xs, ys)\n\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n\n def plot_posterior(self, write=False):\n if (len(self.posteriors) == 0):\n return\n #sample_min = min(self.posteriors)\n #sample_max = max(self.posteriors)\n\n #xs = mlab.frange(sample_min, sample_max, (sample_max - sample_min) / 100)\n #ys = [self.pdf(x) for x in xs]\n #plt.plot(xs, ys, label='Priot Dist ' + self.name)\n\n #plt.title('Prior Dist {}:{}'.format(self.name, self.candidate_standard_deviation))\n plt.title('Posterior {}'.format(self.name))\n plt.hist(self.posteriors, bins=30, normed=True, label=\"Posterior Dist \" + self.name)\n\n if write:\n plt.savefig(self.name + '-posterior.png')\n plt.close()\n else:\n plt.show()\n\n def __add__(self, other):\n return Add(self, other)\n\n def __pow__(self, other):\n return Power(self, other)\n\nclass Add(Node):\n def __init__(self, *args):\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n\n self.parents = [ map_args(n) for n in list(args)]\n\n Node.__init__(\n self,\n ':'.join([ p.name for p in self.parents ]) + ' (Add)',\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n# The purpose of this node is to just have something that gives a fixed value\n# With a probability of 1. This is useful for priors.\nclass Fixed(Node):\n def __init__(self, name, val=None):\n Node.__init__(\n self,\n name + ' (Fixed)',\n val=val\n )\n\n def likelihood(self):\n # It's in log space, remember\n return 0\n\nclass Power(Node):\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n\n if isinstance(exponent, Node):\n self.exponent = exponent \n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n\n Node.__init__(\n self,\n name,\n )\n\n self.parents = [ self.base, self.exponent ]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-ids": [
18,
19,
23,
24,
26
]
}
|
[
18,
19,
23,
24,
26
] |
import pytest
from debbiedowner import make_it_negative, complain_about
def test_negativity():
assert make_it_negative(8) == -8
assert complain_about('enthusiasm') == "I hate enthusiasm. Totally boring."
def test_easy():
assert 1 == 1
def test_cleverness():
assert make_it_negative(-3) == 3
|
normal
|
{
"blob_id": "e73e40a63b67ee1a6cca53a328af05e3eb3d8519",
"index": 703,
"step-1": "<mask token>\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\n<mask token>\n\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3\n",
"step-3": "<mask token>\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\ndef test_easy():\n assert 1 == 1\n\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3\n",
"step-4": "import pytest\nfrom debbiedowner import make_it_negative, complain_about\n\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == 'I hate enthusiasm. Totally boring.'\n\n\ndef test_easy():\n assert 1 == 1\n\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3\n",
"step-5": "import pytest\n\nfrom debbiedowner import make_it_negative, complain_about\n\ndef test_negativity():\n assert make_it_negative(8) == -8\n assert complain_about('enthusiasm') == \"I hate enthusiasm. Totally boring.\"\n\ndef test_easy():\n assert 1 == 1\n\ndef test_cleverness():\n assert make_it_negative(-3) == 3",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(666, 538)
palette = QtGui.QPalette()
self.eventSkip = 0
self.db = Database()
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.inWork = True
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Form.setPalette(palette)
self.tb_EventViewer = QtGui.QTableView(Form)
self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))
self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))
self.tb_EventViewer.horizontalHeader().setVisible(False)
self.tb_EventViewer.verticalHeader().setVisible(False)
self.bt_Earlier = QtGui.QPushButton(Form)
self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))
self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))
self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)
self.bt_Later = QtGui.QPushButton(Form)
self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))
self.bt_Later.setObjectName(_fromUtf8('bt_Later'))
self.bt_Later.clicked.connect(self.clicked_bt_Later)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,
brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI Light'))
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8('label'))
self.cb_EventType = QtGui.QComboBox(Form)
self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))
self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))
self.cb_EventType.currentIndexChanged['QString'].connect(self.
handleChanged)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
self.label_2.setPalette(palette)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI'))
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.initialize()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Revisit business events', None)
)
self.bt_Earlier.setText(_translate('Form', '<<', None))
self.bt_Later.setText(_translate('Form', '>>', None))
self.label.setText(_translate('Form', 'Revisit business events', None))
self.label_2.setText(_translate('Form', 'Select Event Type', None))
<|reserved_special_token_0|>
def getBusinessEventsType(self):
conn = sqlite3.connect('../Database/Business.db')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT Event FROM EventTypes')
locs = [r[0] for r in c.fetchall()]
conn.close()
return locs
def handleChanged(self, text):
modelView = QtGui.QStandardItemModel()
query = QtSql.QSqlQuery()
query.exec_(
"Select * from BusinessEvents a, EventTypes b where b.Event = '" +
text +
"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT " +
str(self.eventSkip) + ',1')
recCount = 0
while query.next():
recCount = recCount + 1
if query.value(2).toString() != '':
query_Origin = QtSql.QSqlQuery()
query_Origin.exec_("Select Name from Cities where ID = '" +
query.value(2).toString() + "' LIMIT 1")
query_Origin.next()
modelInputItem = QtGui.QStandardItem('Origin')
modelInputValue = QtGui.QStandardItem(query_Origin.value(0)
.toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(3).toString() != '':
query_Destination = QtSql.QSqlQuery()
query_Destination.exec_(
"Select Name from Cities where ID = '" + query.value(3)
.toString() + "' LIMIT 1")
query_Destination.next()
modelInputItem = QtGui.QStandardItem('Destination')
modelInputValue = QtGui.QStandardItem(query_Destination.
value(0).toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(4).toString() != '':
modelInputItem = QtGui.QStandardItem('Weight')
modelInputValue = QtGui.QStandardItem(query.value(4).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(5).toString() != '':
modelInputItem = QtGui.QStandardItem('Volume')
modelInputValue = QtGui.QStandardItem(query.value(5).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(6).toString() != '':
modelInputItem = QtGui.QStandardItem('Time of Entry')
modelInputValue = QtGui.QStandardItem(query.value(6).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(7).toString() != '':
modelInputItem = QtGui.QStandardItem('Priority')
modelInputValue = QtGui.QStandardItem(query.value(7).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(8).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per Gram')
modelInputValue = QtGui.QStandardItem(query.value(8).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(9).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per CC')
modelInputValue = QtGui.QStandardItem(query.value(9).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(10).toString() != '':
modelInputItem = QtGui.QStandardItem('Company')
modelInputValue = QtGui.QStandardItem(query.value(10).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(11).toString() != '':
modelInputItem = QtGui.QStandardItem('Transport Type')
modelInputValue = QtGui.QStandardItem(query.value(11).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(12).toString() != '':
modelInputItem = QtGui.QStandardItem('Day of the Week')
modelInputValue = QtGui.QStandardItem(query.value(12).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(13).toString() != '':
modelInputItem = QtGui.QStandardItem('Frequency')
modelInputValue = QtGui.QStandardItem(query.value(13).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(14).toString() != '':
modelInputItem = QtGui.QStandardItem('Duration')
modelInputValue = QtGui.QStandardItem(query.value(14).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if recCount == 0:
self.label_3.setText(_translate('Form', 'No Records found', None))
self.inWork = False
else:
self.label_3.setText(_translate('Form', '', None))
self.inWork = True
self.tb_EventViewer.setModel(modelView)
def clicked_bt_Earlier(self):
self.eventSkip = self.eventSkip + 1
self.handleChanged(self.cb_EventType.currentText())
<|reserved_special_token_0|>
class Database:
def __init__(self, parent=None):
self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.data.setDatabaseName('../Database/Business.db')
self.data.open()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(666, 538)
palette = QtGui.QPalette()
self.eventSkip = 0
self.db = Database()
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.inWork = True
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Form.setPalette(palette)
self.tb_EventViewer = QtGui.QTableView(Form)
self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))
self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))
self.tb_EventViewer.horizontalHeader().setVisible(False)
self.tb_EventViewer.verticalHeader().setVisible(False)
self.bt_Earlier = QtGui.QPushButton(Form)
self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))
self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))
self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)
self.bt_Later = QtGui.QPushButton(Form)
self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))
self.bt_Later.setObjectName(_fromUtf8('bt_Later'))
self.bt_Later.clicked.connect(self.clicked_bt_Later)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,
brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI Light'))
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8('label'))
self.cb_EventType = QtGui.QComboBox(Form)
self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))
self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))
self.cb_EventType.currentIndexChanged['QString'].connect(self.
handleChanged)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
self.label_2.setPalette(palette)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI'))
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.initialize()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Revisit business events', None)
)
self.bt_Earlier.setText(_translate('Form', '<<', None))
self.bt_Later.setText(_translate('Form', '>>', None))
self.label.setText(_translate('Form', 'Revisit business events', None))
self.label_2.setText(_translate('Form', 'Select Event Type', None))
def initialize(self):
self.cb_EventType.addItems(self.getBusinessEventsType())
def getBusinessEventsType(self):
conn = sqlite3.connect('../Database/Business.db')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT Event FROM EventTypes')
locs = [r[0] for r in c.fetchall()]
conn.close()
return locs
def handleChanged(self, text):
modelView = QtGui.QStandardItemModel()
query = QtSql.QSqlQuery()
query.exec_(
"Select * from BusinessEvents a, EventTypes b where b.Event = '" +
text +
"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT " +
str(self.eventSkip) + ',1')
recCount = 0
while query.next():
recCount = recCount + 1
if query.value(2).toString() != '':
query_Origin = QtSql.QSqlQuery()
query_Origin.exec_("Select Name from Cities where ID = '" +
query.value(2).toString() + "' LIMIT 1")
query_Origin.next()
modelInputItem = QtGui.QStandardItem('Origin')
modelInputValue = QtGui.QStandardItem(query_Origin.value(0)
.toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(3).toString() != '':
query_Destination = QtSql.QSqlQuery()
query_Destination.exec_(
"Select Name from Cities where ID = '" + query.value(3)
.toString() + "' LIMIT 1")
query_Destination.next()
modelInputItem = QtGui.QStandardItem('Destination')
modelInputValue = QtGui.QStandardItem(query_Destination.
value(0).toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(4).toString() != '':
modelInputItem = QtGui.QStandardItem('Weight')
modelInputValue = QtGui.QStandardItem(query.value(4).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(5).toString() != '':
modelInputItem = QtGui.QStandardItem('Volume')
modelInputValue = QtGui.QStandardItem(query.value(5).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(6).toString() != '':
modelInputItem = QtGui.QStandardItem('Time of Entry')
modelInputValue = QtGui.QStandardItem(query.value(6).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(7).toString() != '':
modelInputItem = QtGui.QStandardItem('Priority')
modelInputValue = QtGui.QStandardItem(query.value(7).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(8).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per Gram')
modelInputValue = QtGui.QStandardItem(query.value(8).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(9).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per CC')
modelInputValue = QtGui.QStandardItem(query.value(9).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(10).toString() != '':
modelInputItem = QtGui.QStandardItem('Company')
modelInputValue = QtGui.QStandardItem(query.value(10).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(11).toString() != '':
modelInputItem = QtGui.QStandardItem('Transport Type')
modelInputValue = QtGui.QStandardItem(query.value(11).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(12).toString() != '':
modelInputItem = QtGui.QStandardItem('Day of the Week')
modelInputValue = QtGui.QStandardItem(query.value(12).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(13).toString() != '':
modelInputItem = QtGui.QStandardItem('Frequency')
modelInputValue = QtGui.QStandardItem(query.value(13).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(14).toString() != '':
modelInputItem = QtGui.QStandardItem('Duration')
modelInputValue = QtGui.QStandardItem(query.value(14).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if recCount == 0:
self.label_3.setText(_translate('Form', 'No Records found', None))
self.inWork = False
else:
self.label_3.setText(_translate('Form', '', None))
self.inWork = True
self.tb_EventViewer.setModel(modelView)
def clicked_bt_Earlier(self):
self.eventSkip = self.eventSkip + 1
self.handleChanged(self.cb_EventType.currentText())
def clicked_bt_Later(self):
if self.eventSkip > 0:
self.eventSkip = self.eventSkip - 1
self.handleChanged(self.cb_EventType.currentText())
class Database:
def __init__(self, parent=None):
self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.data.setDatabaseName('../Database/Business.db')
self.data.open()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(666, 538)
palette = QtGui.QPalette()
self.eventSkip = 0
self.db = Database()
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.inWork = True
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Form.setPalette(palette)
self.tb_EventViewer = QtGui.QTableView(Form)
self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))
self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))
self.tb_EventViewer.horizontalHeader().setVisible(False)
self.tb_EventViewer.verticalHeader().setVisible(False)
self.bt_Earlier = QtGui.QPushButton(Form)
self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))
self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))
self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)
self.bt_Later = QtGui.QPushButton(Form)
self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))
self.bt_Later.setObjectName(_fromUtf8('bt_Later'))
self.bt_Later.clicked.connect(self.clicked_bt_Later)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,
brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI Light'))
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8('label'))
self.cb_EventType = QtGui.QComboBox(Form)
self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))
self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))
self.cb_EventType.currentIndexChanged['QString'].connect(self.
handleChanged)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
self.label_2.setPalette(palette)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI'))
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.initialize()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Revisit business events', None)
)
self.bt_Earlier.setText(_translate('Form', '<<', None))
self.bt_Later.setText(_translate('Form', '>>', None))
self.label.setText(_translate('Form', 'Revisit business events', None))
self.label_2.setText(_translate('Form', 'Select Event Type', None))
def initialize(self):
self.cb_EventType.addItems(self.getBusinessEventsType())
def getBusinessEventsType(self):
conn = sqlite3.connect('../Database/Business.db')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT Event FROM EventTypes')
locs = [r[0] for r in c.fetchall()]
conn.close()
return locs
def handleChanged(self, text):
modelView = QtGui.QStandardItemModel()
query = QtSql.QSqlQuery()
query.exec_(
"Select * from BusinessEvents a, EventTypes b where b.Event = '" +
text +
"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT " +
str(self.eventSkip) + ',1')
recCount = 0
while query.next():
recCount = recCount + 1
if query.value(2).toString() != '':
query_Origin = QtSql.QSqlQuery()
query_Origin.exec_("Select Name from Cities where ID = '" +
query.value(2).toString() + "' LIMIT 1")
query_Origin.next()
modelInputItem = QtGui.QStandardItem('Origin')
modelInputValue = QtGui.QStandardItem(query_Origin.value(0)
.toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(3).toString() != '':
query_Destination = QtSql.QSqlQuery()
query_Destination.exec_(
"Select Name from Cities where ID = '" + query.value(3)
.toString() + "' LIMIT 1")
query_Destination.next()
modelInputItem = QtGui.QStandardItem('Destination')
modelInputValue = QtGui.QStandardItem(query_Destination.
value(0).toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(4).toString() != '':
modelInputItem = QtGui.QStandardItem('Weight')
modelInputValue = QtGui.QStandardItem(query.value(4).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(5).toString() != '':
modelInputItem = QtGui.QStandardItem('Volume')
modelInputValue = QtGui.QStandardItem(query.value(5).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(6).toString() != '':
modelInputItem = QtGui.QStandardItem('Time of Entry')
modelInputValue = QtGui.QStandardItem(query.value(6).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(7).toString() != '':
modelInputItem = QtGui.QStandardItem('Priority')
modelInputValue = QtGui.QStandardItem(query.value(7).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(8).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per Gram')
modelInputValue = QtGui.QStandardItem(query.value(8).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(9).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per CC')
modelInputValue = QtGui.QStandardItem(query.value(9).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(10).toString() != '':
modelInputItem = QtGui.QStandardItem('Company')
modelInputValue = QtGui.QStandardItem(query.value(10).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(11).toString() != '':
modelInputItem = QtGui.QStandardItem('Transport Type')
modelInputValue = QtGui.QStandardItem(query.value(11).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(12).toString() != '':
modelInputItem = QtGui.QStandardItem('Day of the Week')
modelInputValue = QtGui.QStandardItem(query.value(12).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(13).toString() != '':
modelInputItem = QtGui.QStandardItem('Frequency')
modelInputValue = QtGui.QStandardItem(query.value(13).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(14).toString() != '':
modelInputItem = QtGui.QStandardItem('Duration')
modelInputValue = QtGui.QStandardItem(query.value(14).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if recCount == 0:
self.label_3.setText(_translate('Form', 'No Records found', None))
self.inWork = False
else:
self.label_3.setText(_translate('Form', '', None))
self.inWork = True
self.tb_EventViewer.setModel(modelView)
def clicked_bt_Earlier(self):
self.eventSkip = self.eventSkip + 1
self.handleChanged(self.cb_EventType.currentText())
def clicked_bt_Later(self):
if self.eventSkip > 0:
self.eventSkip = self.eventSkip - 1
self.handleChanged(self.cb_EventType.currentText())
class Database:
def __init__(self, parent=None):
self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.data.setDatabaseName('../Database/Business.db')
self.data.open()
<|reserved_special_token_1|>
from PyQt4 import QtCore, QtGui, QtSql
import sqlite3
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(666, 538)
palette = QtGui.QPalette()
self.eventSkip = 0
self.db = Database()
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.inWork = True
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Form.setPalette(palette)
self.tb_EventViewer = QtGui.QTableView(Form)
self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))
self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))
self.tb_EventViewer.horizontalHeader().setVisible(False)
self.tb_EventViewer.verticalHeader().setVisible(False)
self.bt_Earlier = QtGui.QPushButton(Form)
self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))
self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))
self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)
self.bt_Later = QtGui.QPushButton(Form)
self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))
self.bt_Later.setObjectName(_fromUtf8('bt_Later'))
self.bt_Later.clicked.connect(self.clicked_bt_Later)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,
brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI Light'))
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8('label'))
self.cb_EventType = QtGui.QComboBox(Form)
self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))
self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))
self.cb_EventType.currentIndexChanged['QString'].connect(self.
handleChanged)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
self.label_2.setPalette(palette)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8('Segoe UI'))
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8('label_2'))
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8('label_3'))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.initialize()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', 'Revisit business events', None)
)
self.bt_Earlier.setText(_translate('Form', '<<', None))
self.bt_Later.setText(_translate('Form', '>>', None))
self.label.setText(_translate('Form', 'Revisit business events', None))
self.label_2.setText(_translate('Form', 'Select Event Type', None))
def initialize(self):
self.cb_EventType.addItems(self.getBusinessEventsType())
def getBusinessEventsType(self):
conn = sqlite3.connect('../Database/Business.db')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT Event FROM EventTypes')
locs = [r[0] for r in c.fetchall()]
conn.close()
return locs
def handleChanged(self, text):
modelView = QtGui.QStandardItemModel()
query = QtSql.QSqlQuery()
query.exec_(
"Select * from BusinessEvents a, EventTypes b where b.Event = '" +
text +
"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT " +
str(self.eventSkip) + ',1')
recCount = 0
while query.next():
recCount = recCount + 1
if query.value(2).toString() != '':
query_Origin = QtSql.QSqlQuery()
query_Origin.exec_("Select Name from Cities where ID = '" +
query.value(2).toString() + "' LIMIT 1")
query_Origin.next()
modelInputItem = QtGui.QStandardItem('Origin')
modelInputValue = QtGui.QStandardItem(query_Origin.value(0)
.toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(3).toString() != '':
query_Destination = QtSql.QSqlQuery()
query_Destination.exec_(
"Select Name from Cities where ID = '" + query.value(3)
.toString() + "' LIMIT 1")
query_Destination.next()
modelInputItem = QtGui.QStandardItem('Destination')
modelInputValue = QtGui.QStandardItem(query_Destination.
value(0).toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(4).toString() != '':
modelInputItem = QtGui.QStandardItem('Weight')
modelInputValue = QtGui.QStandardItem(query.value(4).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(5).toString() != '':
modelInputItem = QtGui.QStandardItem('Volume')
modelInputValue = QtGui.QStandardItem(query.value(5).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(6).toString() != '':
modelInputItem = QtGui.QStandardItem('Time of Entry')
modelInputValue = QtGui.QStandardItem(query.value(6).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(7).toString() != '':
modelInputItem = QtGui.QStandardItem('Priority')
modelInputValue = QtGui.QStandardItem(query.value(7).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(8).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per Gram')
modelInputValue = QtGui.QStandardItem(query.value(8).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(9).toString() != '':
modelInputItem = QtGui.QStandardItem('Price Per CC')
modelInputValue = QtGui.QStandardItem(query.value(9).toString()
)
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(10).toString() != '':
modelInputItem = QtGui.QStandardItem('Company')
modelInputValue = QtGui.QStandardItem(query.value(10).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(11).toString() != '':
modelInputItem = QtGui.QStandardItem('Transport Type')
modelInputValue = QtGui.QStandardItem(query.value(11).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(12).toString() != '':
modelInputItem = QtGui.QStandardItem('Day of the Week')
modelInputValue = QtGui.QStandardItem(query.value(12).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(13).toString() != '':
modelInputItem = QtGui.QStandardItem('Frequency')
modelInputValue = QtGui.QStandardItem(query.value(13).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if query.value(14).toString() != '':
modelInputItem = QtGui.QStandardItem('Duration')
modelInputValue = QtGui.QStandardItem(query.value(14).
toString())
modelView.appendRow([modelInputItem, modelInputValue])
if recCount == 0:
self.label_3.setText(_translate('Form', 'No Records found', None))
self.inWork = False
else:
self.label_3.setText(_translate('Form', '', None))
self.inWork = True
self.tb_EventViewer.setModel(modelView)
def clicked_bt_Earlier(self):
self.eventSkip = self.eventSkip + 1
self.handleChanged(self.cb_EventType.currentText())
def clicked_bt_Later(self):
if self.eventSkip > 0:
self.eventSkip = self.eventSkip - 1
self.handleChanged(self.cb_EventType.currentText())
class Database:
def __init__(self, parent=None):
self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.data.setDatabaseName('../Database/Business.db')
self.data.open()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'KPS_RevisitBusinessEvents.ui'
#
# Created: Sun May 18 14:50:49 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui, QtSql
import sqlite3
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(666, 538)
palette = QtGui.QPalette()
self.eventSkip = 0;
self.db = Database()
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
self.inWork = True
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Form.setPalette(palette)
self.tb_EventViewer = QtGui.QTableView(Form)
self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))
self.tb_EventViewer.setObjectName(_fromUtf8("tb_EventViewer"))
self.tb_EventViewer.horizontalHeader().setVisible(False)
self.tb_EventViewer.verticalHeader().setVisible(False)
# self.tb_EventViewer.setColumnCount(0)
# self.tb_EventViewer.setRowCount(0)
self.bt_Earlier = QtGui.QPushButton(Form)
self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))
self.bt_Earlier.setObjectName(_fromUtf8("bt_Earlier"))
self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)
self.bt_Later = QtGui.QPushButton(Form)
self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))
self.bt_Later.setObjectName(_fromUtf8("bt_Later"))
self.bt_Later.clicked.connect(self.clicked_bt_Later)
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.label.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Segoe UI Light"))
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.cb_EventType = QtGui.QComboBox(Form)
self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))
self.cb_EventType.setObjectName(_fromUtf8("cb_EventType"))
self.cb_EventType.currentIndexChanged['QString'].connect(self.handleChanged)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.label_2.setPalette(palette)
self.label_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Segoe UI"))
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.initialize()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Revisit business events", None))
self.bt_Earlier.setText(_translate("Form", "<<", None))
self.bt_Later.setText(_translate("Form", ">>", None))
self.label.setText(_translate("Form", "Revisit business events", None))
self.label_2.setText(_translate("Form", "Select Event Type", None))
def initialize(self):
self.cb_EventType.addItems(self.getBusinessEventsType())
# self.cb_Destination.addItems(RH.getLocations())
def getBusinessEventsType(self):
conn = sqlite3.connect("../Database/Business.db")
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT Event FROM EventTypes')
locs = [r[0] for r in c.fetchall()]
conn.close()
return locs
def handleChanged(self, text):
modelView = QtGui.QStandardItemModel()
query = QtSql.QSqlQuery()
query.exec_("Select * from BusinessEvents a, EventTypes b where b.Event = '" + text + "' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT " + str(self.eventSkip) + ",1")
recCount = 0;
while query.next():
recCount = recCount + 1
if query.value(2).toString() != '':
query_Origin = QtSql.QSqlQuery()
query_Origin.exec_("Select Name from Cities where ID = '" + query.value(2).toString() + "' LIMIT 1")
query_Origin.next()
modelInputItem = QtGui.QStandardItem("Origin")
modelInputValue = QtGui.QStandardItem(query_Origin.value(0).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(3).toString() != '':
query_Destination = QtSql.QSqlQuery()
query_Destination.exec_("Select Name from Cities where ID = '" + query.value(3).toString() + "' LIMIT 1")
query_Destination.next()
modelInputItem = QtGui.QStandardItem("Destination")
modelInputValue = QtGui.QStandardItem(query_Destination.value(0).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(4).toString() != '':
modelInputItem = QtGui.QStandardItem("Weight")
modelInputValue = QtGui.QStandardItem(query.value(4).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(5).toString() != '':
modelInputItem = QtGui.QStandardItem("Volume")
modelInputValue = QtGui.QStandardItem(query.value(5).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(6).toString() != '':
modelInputItem = QtGui.QStandardItem("Time of Entry")
modelInputValue = QtGui.QStandardItem(query.value(6).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(7).toString() != '':
modelInputItem = QtGui.QStandardItem("Priority")
modelInputValue = QtGui.QStandardItem(query.value(7).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(8).toString() != '':
modelInputItem = QtGui.QStandardItem("Price Per Gram")
modelInputValue = QtGui.QStandardItem(query.value(8).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(9).toString() != '':
modelInputItem = QtGui.QStandardItem("Price Per CC")
modelInputValue = QtGui.QStandardItem(query.value(9).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(10).toString() != '':
modelInputItem = QtGui.QStandardItem("Company")
modelInputValue = QtGui.QStandardItem(query.value(10).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(11).toString() != '':
modelInputItem = QtGui.QStandardItem("Transport Type")
modelInputValue = QtGui.QStandardItem(query.value(11).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(12).toString() != '':
modelInputItem = QtGui.QStandardItem("Day of the Week")
modelInputValue = QtGui.QStandardItem(query.value(12).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(13).toString() != '':
modelInputItem = QtGui.QStandardItem("Frequency")
modelInputValue = QtGui.QStandardItem(query.value(13).toString())
modelView.appendRow([modelInputItem,modelInputValue])
if query.value(14).toString() != '':
modelInputItem = QtGui.QStandardItem("Duration")
modelInputValue = QtGui.QStandardItem(query.value(14).toString())
modelView.appendRow([modelInputItem,modelInputValue])
#modelInputValue = QtGui.QStandardItem('Value')
# modelView.appendRow([modelInputItem,modelInputValue])
if recCount == 0:
self.label_3.setText(_translate("Form", "No Records found", None))
self.inWork = False
else:
self.label_3.setText(_translate("Form", "", None))
self.inWork = True
self.tb_EventViewer.setModel(modelView)
def clicked_bt_Earlier(self):
self.eventSkip = self.eventSkip + 1
self.handleChanged(self.cb_EventType.currentText())
def clicked_bt_Later(self):
if self.eventSkip > 0:
self.eventSkip = self.eventSkip - 1
self.handleChanged(self.cb_EventType.currentText())
class Database:
def __init__(self, parent = None):
self.data = QtSql.QSqlDatabase.addDatabase("QSQLITE")
self.data.setDatabaseName("../Database/Business.db")
self.data.open()
|
flexible
|
{
"blob_id": "8339113fd6b0c286cc48ec04e6e24978e2a4b44e",
"index": 9991,
"step-1": "<mask token>\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n <mask token>\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n <mask token>\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-2": "<mask token>\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n\n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n\n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1\n self.handleChanged(self.cb_EventType.currentText())\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-3": "<mask token>\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\n def _fromUtf8(s):\n return s\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n\n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n\n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1\n self.handleChanged(self.cb_EventType.currentText())\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-4": "from PyQt4 import QtCore, QtGui, QtSql\nimport sqlite3\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\n def _fromUtf8(s):\n return s\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Ui_Form(object):\n\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8('Form'))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0\n self.db = Database()\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n self.inWork = True\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8('tb_EventViewer'))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8('bt_Earlier'))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8('bt_Later'))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText,\n brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI Light'))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8('label'))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8('cb_EventType'))\n self.cb_EventType.currentIndexChanged['QString'].connect(self.\n handleChanged)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,\n brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,\n brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8('Segoe UI'))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8('label_2'))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8('label_3'))\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate('Form', 'Revisit business events', None)\n )\n self.bt_Earlier.setText(_translate('Form', '<<', None))\n self.bt_Later.setText(_translate('Form', '>>', None))\n self.label.setText(_translate('Form', 'Revisit business events', None))\n self.label_2.setText(_translate('Form', 'Select Event Type', None))\n\n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n\n def getBusinessEventsType(self):\n conn = sqlite3.connect('../Database/Business.db')\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n\n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n query.exec_(\n \"Select * from BusinessEvents a, EventTypes b where b.Event = '\" +\n text +\n \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" +\n str(self.eventSkip) + ',1')\n recCount = 0\n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" +\n query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem('Origin')\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0)\n .toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\n \"Select Name from Cities where ID = '\" + query.value(3)\n .toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem('Destination')\n modelInputValue = QtGui.QStandardItem(query_Destination.\n value(0).toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem('Weight')\n modelInputValue = QtGui.QStandardItem(query.value(4).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem('Volume')\n modelInputValue = QtGui.QStandardItem(query.value(5).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem('Time of Entry')\n modelInputValue = QtGui.QStandardItem(query.value(6).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem('Priority')\n modelInputValue = QtGui.QStandardItem(query.value(7).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per Gram')\n modelInputValue = QtGui.QStandardItem(query.value(8).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem('Price Per CC')\n modelInputValue = QtGui.QStandardItem(query.value(9).toString()\n )\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem('Company')\n modelInputValue = QtGui.QStandardItem(query.value(10).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem('Transport Type')\n modelInputValue = QtGui.QStandardItem(query.value(11).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem('Day of the Week')\n modelInputValue = QtGui.QStandardItem(query.value(12).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem('Frequency')\n modelInputValue = QtGui.QStandardItem(query.value(13).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem('Duration')\n modelInputValue = QtGui.QStandardItem(query.value(14).\n toString())\n modelView.appendRow([modelInputItem, modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate('Form', 'No Records found', None))\n self.inWork = False\n else:\n self.label_3.setText(_translate('Form', '', None))\n self.inWork = True\n self.tb_EventViewer.setModel(modelView)\n\n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n\n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1\n self.handleChanged(self.cb_EventType.currentText())\n\n\nclass Database:\n\n def __init__(self, parent=None):\n self.data = QtSql.QSqlDatabase.addDatabase('QSQLITE')\n self.data.setDatabaseName('../Database/Business.db')\n self.data.open()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'KPS_RevisitBusinessEvents.ui'\n#\n# Created: Sun May 18 14:50:49 2014\n# by: PyQt4 UI code generator 4.10.4\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui, QtSql\nimport sqlite3\n\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_Form(object):\n \n \n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8(\"Form\"))\n Form.resize(666, 538)\n palette = QtGui.QPalette()\n self.eventSkip = 0;\n self.db = Database()\n \n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern) \n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n \n self.inWork = True\n \n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern) \n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n \n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n \n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n \n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(8, 129, 2))\n brush.setStyle(QtCore.Qt.SolidPattern)\n \n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n Form.setPalette(palette)\n self.tb_EventViewer = QtGui.QTableView(Form)\n self.tb_EventViewer.setGeometry(QtCore.QRect(60, 120, 531, 351))\n self.tb_EventViewer.setObjectName(_fromUtf8(\"tb_EventViewer\"))\n self.tb_EventViewer.horizontalHeader().setVisible(False)\n self.tb_EventViewer.verticalHeader().setVisible(False)\n # self.tb_EventViewer.setColumnCount(0)\n # self.tb_EventViewer.setRowCount(0)\n self.bt_Earlier = QtGui.QPushButton(Form)\n self.bt_Earlier.setGeometry(QtCore.QRect(60, 90, 75, 23))\n self.bt_Earlier.setObjectName(_fromUtf8(\"bt_Earlier\"))\n self.bt_Earlier.clicked.connect(self.clicked_bt_Earlier)\n \n \n self.bt_Later = QtGui.QPushButton(Form)\n self.bt_Later.setGeometry(QtCore.QRect(510, 90, 75, 23))\n self.bt_Later.setObjectName(_fromUtf8(\"bt_Later\"))\n self.bt_Later.clicked.connect(self.clicked_bt_Later)\n \n self.label = QtGui.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(70, 0, 511, 41))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)\n self.label.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Segoe UI Light\"))\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(_fromUtf8(\"label\"))\n self.cb_EventType = QtGui.QComboBox(Form)\n self.cb_EventType.setGeometry(QtCore.QRect(230, 50, 221, 22))\n self.cb_EventType.setObjectName(_fromUtf8(\"cb_EventType\")) \n self.cb_EventType.currentIndexChanged['QString'].connect(self.handleChanged) \n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(70, 50, 121, 21))\n \n self.label_3 = QtGui.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(190, 90, 221, 21))\n \n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n self.label_2.setPalette(palette)\n self.label_3.setPalette(palette)\n font = QtGui.QFont()\n font.setFamily(_fromUtf8(\"Segoe UI\"))\n font.setPointSize(12)\n self.label_2.setFont(font)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.label_3.setFont(font)\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.initialize()\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate(\"Form\", \"Revisit business events\", None))\n self.bt_Earlier.setText(_translate(\"Form\", \"<<\", None))\n self.bt_Later.setText(_translate(\"Form\", \">>\", None))\n self.label.setText(_translate(\"Form\", \"Revisit business events\", None))\n self.label_2.setText(_translate(\"Form\", \"Select Event Type\", None))\n \n \n def initialize(self):\n self.cb_EventType.addItems(self.getBusinessEventsType())\n # self.cb_Destination.addItems(RH.getLocations())\n \n def getBusinessEventsType(self):\n conn = sqlite3.connect(\"../Database/Business.db\")\n conn.text_factory = str\n c = conn.cursor()\n c.execute('SELECT Event FROM EventTypes')\n locs = [r[0] for r in c.fetchall()]\n conn.close()\n return locs\n \n def handleChanged(self, text):\n modelView = QtGui.QStandardItemModel()\n query = QtSql.QSqlQuery()\n\n query.exec_(\"Select * from BusinessEvents a, EventTypes b where b.Event = '\" + text + \"' and b.EventTypeID = a.EventTypeID order by ID DESC LIMIT \" + str(self.eventSkip) + \",1\")\n recCount = 0;\n \n while query.next():\n recCount = recCount + 1\n if query.value(2).toString() != '':\n query_Origin = QtSql.QSqlQuery()\n query_Origin.exec_(\"Select Name from Cities where ID = '\" + query.value(2).toString() + \"' LIMIT 1\")\n query_Origin.next()\n modelInputItem = QtGui.QStandardItem(\"Origin\")\n modelInputValue = QtGui.QStandardItem(query_Origin.value(0).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(3).toString() != '':\n query_Destination = QtSql.QSqlQuery()\n query_Destination.exec_(\"Select Name from Cities where ID = '\" + query.value(3).toString() + \"' LIMIT 1\")\n query_Destination.next()\n modelInputItem = QtGui.QStandardItem(\"Destination\")\n modelInputValue = QtGui.QStandardItem(query_Destination.value(0).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(4).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Weight\")\n modelInputValue = QtGui.QStandardItem(query.value(4).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(5).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Volume\")\n modelInputValue = QtGui.QStandardItem(query.value(5).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(6).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Time of Entry\")\n modelInputValue = QtGui.QStandardItem(query.value(6).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(7).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Priority\")\n modelInputValue = QtGui.QStandardItem(query.value(7).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(8).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Price Per Gram\")\n modelInputValue = QtGui.QStandardItem(query.value(8).toString())\n modelView.appendRow([modelInputItem,modelInputValue])\n if query.value(9).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Price Per CC\")\n modelInputValue = QtGui.QStandardItem(query.value(9).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(10).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Company\")\n modelInputValue = QtGui.QStandardItem(query.value(10).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(11).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Transport Type\")\n modelInputValue = QtGui.QStandardItem(query.value(11).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(12).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Day of the Week\")\n modelInputValue = QtGui.QStandardItem(query.value(12).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(13).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Frequency\")\n modelInputValue = QtGui.QStandardItem(query.value(13).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n if query.value(14).toString() != '':\n modelInputItem = QtGui.QStandardItem(\"Duration\")\n modelInputValue = QtGui.QStandardItem(query.value(14).toString())\n modelView.appendRow([modelInputItem,modelInputValue]) \n #modelInputValue = QtGui.QStandardItem('Value')\n # modelView.appendRow([modelInputItem,modelInputValue])\n if recCount == 0:\n self.label_3.setText(_translate(\"Form\", \"No Records found\", None))\n self.inWork = False\n else:\n self.label_3.setText(_translate(\"Form\", \"\", None))\n self.inWork = True\n \n self.tb_EventViewer.setModel(modelView)\n \n def clicked_bt_Earlier(self):\n self.eventSkip = self.eventSkip + 1\n self.handleChanged(self.cb_EventType.currentText())\n \n def clicked_bt_Later(self):\n if self.eventSkip > 0:\n self.eventSkip = self.eventSkip - 1 \n self.handleChanged(self.cb_EventType.currentText())\n \nclass Database:\n def __init__(self, parent = None):\n self.data = QtSql.QSqlDatabase.addDatabase(\"QSQLITE\")\n self.data.setDatabaseName(\"../Database/Business.db\")\n self.data.open()\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
import sys
sys.stdin = open('input.txt', 'rt')
BLOCK_0 = 1
BLOCK_1 = 2
BLOCK_2 = 3
N = int(input())
X, Y = 10, 10
# x: 행 , y: 열A
GRN = 0
BLU = 1
maps = [[0]*Y for _ in range(X)]
dx = [1, 0]
dy = [0, 1]
def outMaps(x, y):
global X, Y
if 0<=x<X and 0<=y<Y: return False
else: return True
def meetBlock(x, y, maps):
if maps[x][y] == 1: return True
else: return False
def onlyUpdate(n_blocks, xs, ys, maps):
for i in range(n_blocks):
maps[xs[i]][ys[i]] = 1
def oneLineFull(maps, CLR):
for i in range(4, 10):
for j in range(4):
if CLR == GRN and maps[i][j] == 0:
break
elif CLR == BLU and maps[j][i] == 0:
break
else: # 전부 1이여서 full line일 때
return True, i
return False, 0
def pullAndUpdate(olf_idx, maps, CLR):
#for olf in list_olf:
for i in range(olf_idx, 3, -1):
for j in range(4):
if CLR == GRN:
if olf_idx == 4:
maps[i][j] = 0
else:
maps[i][j] = maps[i-1][j]
maps[i-1][j] = 0
elif CLR == BLU:
if olf_idx == 4:
maps[j][i] = 0
else:
maps[j][i] = maps[j][i-1]
maps[j][i-1] = 0
def pushAndPullUpdate(n_inBorder, maps, CLR):
for i in range(10-1-n_inBorder, 3, -1):
for j in range(4):
if CLR == GRN:
maps[i+n_inBorder][j] = maps[i][j]
maps[i][j] = 0
elif CLR == BLU:
maps[j][i+n_inBorder] = maps[j][i]
maps[j][i] = 0
def print_maps(maps):
global X, Y
for i in range(X):
for j in range(Y):
print(maps[i][j], end=' ')
print()
print()
def isBlockInBorder(maps, CLR):
cnt = 0
for i in range(4, 6):
for j in range(4):
if (CLR == GRN and maps[i][j] == 1) or (CLR == BLU and maps[j][i] == 1):
cnt += 1
break
return cnt
def Mover(n_blocks, xs_ori, ys_ori, maps, CLR):
xs = xs_ori.copy()
ys = ys_ori.copy()
score = 0
STOP_FLAG = False
while not STOP_FLAG:
for i in range(n_blocks):
xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]
if outMaps(xt, yt):
STOP_FLAG = True
break
if meetBlock(xt, yt, maps):
STOP_FLAG = True
break
else:
# break 걸리지 않고 넘어왔으므로, update
for i in range(n_blocks):
xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]
# 만약 STOP_FLAG == True 로 탈출했다면
# 해당 상자의 이동이 끝난 것 이므로 한 줄이 전부 차있는 것이 있는지 check
# maps에 업데이트
onlyUpdate(n_blocks, xs, ys, maps)
# 만약 one line full 인 라인이 있다면
OLF_FLAG = True
while OLF_FLAG:
OLF_FLAG, olf_idx = oneLineFull(maps, CLR)
if OLF_FLAG:
score += 1
pullAndUpdate(olf_idx, maps, CLR)
# 만약 경계안에 block이 존재한다면
n_inBorder = isBlockInBorder(maps, CLR)
if n_inBorder:
pushAndPullUpdate(n_inBorder, maps, CLR)
return score
def Area_score(maps, CLR):
score = 0
for i in range(4, 10):
for j in range(4):
if CLR == GRN: score += maps[i][j]
elif CLR == BLU: score += maps[j][i]
return score
total_score = 0
for i in range(N):
t, x, y = map(int, input().split())
xs, ys = [x], [y]
if t == BLOCK_0:
n_blocks = 1
elif t == BLOCK_1:
n_blocks = 2
xs.append(x)
ys.append(y+1)
elif t == BLOCK_2:
n_blocks = 2
xs.append(x+1)
ys.append(y)
total_score += Mover(n_blocks, xs, ys, maps, GRN)
total_score += Mover(n_blocks, xs, ys, maps, BLU)
#print_maps(maps)
grn_score = Area_score(maps, GRN)
blu_score = Area_score(maps, BLU)
print(total_score)
print(grn_score+blu_score)
|
normal
|
{
"blob_id": "937d01eaa82cbfe07b20fae9320c554a0960d7b1",
"index": 571,
"step-1": "<mask token>\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<mask token>\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\n<mask token>\nprint(total_score)\nprint(grn_score + blu_score)\n",
"step-4": "<mask token>\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\nGRN = 0\nBLU = 1\nmaps = [([0] * Y) for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\nprint(total_score)\nprint(grn_score + blu_score)\n",
"step-5": "import sys\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\n# x: 행 , y: 열A\nGRN = 0\nBLU = 1\nmaps = [[0]*Y for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\ndef outMaps(x, y):\n global X, Y\n if 0<=x<X and 0<=y<Y: return False\n else: return True\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1: return True\n else: return False\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else: # 전부 1이여서 full line일 때\n return True, i\n return False, 0\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n #for olf in list_olf:\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i-1][j]\n maps[i-1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i-1]\n maps[j][i-1] = 0\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10-1-n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i+n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i+n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if (CLR == GRN and maps[i][j] == 1) or (CLR == BLU and maps[j][i] == 1):\n cnt += 1\n break\n return cnt\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n # break 걸리지 않고 넘어왔으므로, update\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n # 만약 STOP_FLAG == True 로 탈출했다면\n # 해당 상자의 이동이 끝난 것 이므로 한 줄이 전부 차있는 것이 있는지 check\n # maps에 업데이트\n onlyUpdate(n_blocks, xs, ys, maps)\n \n # 만약 one line full 인 라인이 있다면\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n\n # 만약 경계안에 block이 존재한다면\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN: score += maps[i][j]\n elif CLR == BLU: score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y+1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x+1)\n ys.append(y)\n\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\n #print_maps(maps)\n\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\n\nprint(total_score)\nprint(grn_score+blu_score)\n\n\n\n\n",
"step-ids": [
9,
10,
11,
12,
14
]
}
|
[
9,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
class Item(object):
def __init__(self, name, category):
self.name = name
self.category = category
class Category(object):
def __init__(self, name):
self.name = name
class ItemTable(Table):
name = Col('Name')
category_name = Col('Category', attr_list=['category', 'name'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Item(object):
def __init__(self, name, category):
self.name = name
self.category = category
class Category(object):
def __init__(self, name):
self.name = name
class ItemTable(Table):
name = Col('Name')
category_name = Col('Category', attr_list=['category', 'name'])
def main():
items = [Item('A', Category('catA')), Item('B', Category('catB'))]
tab = ItemTable(items)
print(tab.__html__())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Item(object):
def __init__(self, name, category):
self.name = name
self.category = category
class Category(object):
def __init__(self, name):
self.name = name
class ItemTable(Table):
name = Col('Name')
category_name = Col('Category', attr_list=['category', 'name'])
def main():
items = [Item('A', Category('catA')), Item('B', Category('catB'))]
tab = ItemTable(items)
print(tab.__html__())
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from flask_table import Table, Col
<|reserved_special_token_0|>
class Item(object):
def __init__(self, name, category):
self.name = name
self.category = category
class Category(object):
def __init__(self, name):
self.name = name
class ItemTable(Table):
name = Col('Name')
category_name = Col('Category', attr_list=['category', 'name'])
def main():
items = [Item('A', Category('catA')), Item('B', Category('catB'))]
tab = ItemTable(items)
print(tab.__html__())
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from flask_table import Table, Col
"""Lets suppose that we have a class that we get an iterable of from
somewhere, such as a database. We can declare a table that pulls out
the relevant entries, escapes them and displays them.
"""
class Item(object):
def __init__(self, name, category):
self.name = name
self.category = category
class Category(object):
def __init__(self, name):
self.name = name
class ItemTable(Table):
name = Col('Name')
category_name = Col('Category', attr_list=['category', 'name'])
# Equivalently: Col('Category', attr='category.name')
# Both syntaxes are kept as the second is more readable, but
# doesn't cover all options. Such as if the items are dicts and
# the keys have dots in.
def main():
items = [Item('A', Category('catA')),
Item('B', Category('catB'))]
tab = ItemTable(items)
print(tab.__html__())
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "3191fa5f9c50993d17e12e4e2e9d56cfce2108e7",
"index": 5646,
"step-1": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\ndef main():\n items = [Item('A', Category('catA')), Item('B', Category('catB'))]\n tab = ItemTable(items)\n print(tab.__html__())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\ndef main():\n items = [Item('A', Category('catA')), Item('B', Category('catB'))]\n tab = ItemTable(items)\n print(tab.__html__())\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from flask_table import Table, Col\n<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\ndef main():\n items = [Item('A', Category('catA')), Item('B', Category('catB'))]\n tab = ItemTable(items)\n print(tab.__html__())\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from flask_table import Table, Col\n\n\n\"\"\"Lets suppose that we have a class that we get an iterable of from\nsomewhere, such as a database. We can declare a table that pulls out\nthe relevant entries, escapes them and displays them.\n\n\"\"\"\n\n\nclass Item(object):\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n # Equivalently: Col('Category', attr='category.name')\n # Both syntaxes are kept as the second is more readable, but\n # doesn't cover all options. Such as if the items are dicts and\n # the keys have dots in.\n\n\ndef main():\n items = [Item('A', Category('catA')),\n Item('B', Category('catB'))]\n\n tab = ItemTable(items)\n print(tab.__html__())\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# 上传文件
import os
from selenium import webdriver
# 获取当前路径的 “files” 文件夹
file_path = os.path.abspath("./files//")
# 浏览器打开文件夹的 upfile.html 文件
driver = webdriver.Firefox()
upload_page = "file:///" + file_path + "/upfile.html"
driver.get(upload_page)
# 定位上传按钮,添加本地文件
driver.find_element_by_id("inputfile").send_keys(file_path + "\\test.txt")
|
normal
|
{
"blob_id": "9e28fa1f221df13f9cc8e6b71586da961ebdc0e0",
"index": 4580,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get(upload_page)\ndriver.find_element_by_id('inputfile').send_keys(file_path + '\\\\test.txt')\n",
"step-3": "<mask token>\nfile_path = os.path.abspath('./files//')\ndriver = webdriver.Firefox()\nupload_page = 'file:///' + file_path + '/upfile.html'\ndriver.get(upload_page)\ndriver.find_element_by_id('inputfile').send_keys(file_path + '\\\\test.txt')\n",
"step-4": "import os\nfrom selenium import webdriver\nfile_path = os.path.abspath('./files//')\ndriver = webdriver.Firefox()\nupload_page = 'file:///' + file_path + '/upfile.html'\ndriver.get(upload_page)\ndriver.find_element_by_id('inputfile').send_keys(file_path + '\\\\test.txt')\n",
"step-5": "# 上传文件\n\nimport os\nfrom selenium import webdriver\n\n# 获取当前路径的 “files” 文件夹\nfile_path = os.path.abspath(\"./files//\")\n\n# 浏览器打开文件夹的 upfile.html 文件\ndriver = webdriver.Firefox()\nupload_page = \"file:///\" + file_path + \"/upfile.html\"\ndriver.get(upload_page)\n\n# 定位上传按钮,添加本地文件\ndriver.find_element_by_id(\"inputfile\").send_keys(file_path + \"\\\\test.txt\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sklearn.metrics as metrics
import sklearn.cross_validation as cv
from sklearn.externals import joblib
import MachineLearning.Reinforcement.InternalSQLManager as sqlManager
class ReinforcementLearner:
def __init__(self, clf=None, load=False, clfName=None):
"""
Initialise the Classifier, either from the provided model or from the stored classifier
:param clf: The current classifier, not yet fitted to the data
:param load: Set to True in order to load a previously saved model
"""
if load:
self.clf = joblib.load("model.pkl")
self.reTrain = True
else:
self.clf = clf
self.reTrain = False
if clfName == None:
self.name = self.clf.__class__.__name__
else:
self.name = clfName
def fit(self, X, y, scoring="accuracy", crossval=5):
"""
Fit the Reinforcement classifier with data, either adding to previous previous data or learning for first time.
:param X: Input Features
:param y: Class Labels
:param scoring: Scoring used for cross validation
:param crossval: Cross Validation number of folds
:return: True if a new model is fit to the data, or a previous model is updated
False if old model when fit to new data performs poorly in comparison to
earlier data
"""
if not self.reTrain: # Train first time
score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)
sqlManager.insertValue(self.name, 0.0, score.mean(), 0, len(y), 1) # Store the first result of clf
self.clf.fit(X, y)
joblib.dump(self.clf, "model.pkl") # Store the CLF
print("Data Fit")
return True
else:
previousData = sqlManager.selectNewestRecord(self.name) # Check the last entry of CLF
if len(previousData) > 0:
oldSize = previousData[5]
newSize = len(y)
accScore = previousData[3]
score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)
newAccScore = score.mean()
print("Old Accuracy Score : ", accScore)
print("New Accuracy Score : ", newAccScore)
if accScore <= newAccScore: # If new data is benefitial, increases accuracy
print("Reinforcement Learning : Newer model is superior. Saving Model.")
self.clf.fit(X, y)
sqlManager.insertValue(self.name, accScore, newAccScore, oldSize, newSize, 1)
joblib.dump(self.clf, "model.pkl")
return True
else:
print("Reinforcement Learning : Newer model is inferior. Not saving model.")
return False
def predict(self, X):
return self.clf.predict(X)
def __exit__(self, exc_type, exc_val, exc_tb):
sqlManager.close()
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "c9be3d25824093528e2bee51c045d05e036daa67",
"index": 9715,
"step-1": "<mask token>\n\n\nclass ReinforcementLearner:\n\n def __init__(self, clf=None, load=False, clfName=None):\n \"\"\"\n Initialise the Classifier, either from the provided model or from the stored classifier\n\n :param clf: The current classifier, not yet fitted to the data\n :param load: Set to True in order to load a previously saved model\n \"\"\"\n if load:\n self.clf = joblib.load('model.pkl')\n self.reTrain = True\n else:\n self.clf = clf\n self.reTrain = False\n if clfName == None:\n self.name = self.clf.__class__.__name__\n else:\n self.name = clfName\n <mask token>\n <mask token>\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sqlManager.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ReinforcementLearner:\n\n def __init__(self, clf=None, load=False, clfName=None):\n \"\"\"\n Initialise the Classifier, either from the provided model or from the stored classifier\n\n :param clf: The current classifier, not yet fitted to the data\n :param load: Set to True in order to load a previously saved model\n \"\"\"\n if load:\n self.clf = joblib.load('model.pkl')\n self.reTrain = True\n else:\n self.clf = clf\n self.reTrain = False\n if clfName == None:\n self.name = self.clf.__class__.__name__\n else:\n self.name = clfName\n <mask token>\n\n def predict(self, X):\n return self.clf.predict(X)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sqlManager.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ReinforcementLearner:\n\n def __init__(self, clf=None, load=False, clfName=None):\n \"\"\"\n Initialise the Classifier, either from the provided model or from the stored classifier\n\n :param clf: The current classifier, not yet fitted to the data\n :param load: Set to True in order to load a previously saved model\n \"\"\"\n if load:\n self.clf = joblib.load('model.pkl')\n self.reTrain = True\n else:\n self.clf = clf\n self.reTrain = False\n if clfName == None:\n self.name = self.clf.__class__.__name__\n else:\n self.name = clfName\n\n def fit(self, X, y, scoring='accuracy', crossval=5):\n \"\"\"\n Fit the Reinforcement classifier with data, either adding to previous previous data or learning for first time.\n\n :param X: Input Features\n :param y: Class Labels\n :param scoring: Scoring used for cross validation\n :param crossval: Cross Validation number of folds\n :return: True if a new model is fit to the data, or a previous model is updated\n False if old model when fit to new data performs poorly in comparison to\n earlier data\n \"\"\"\n if not self.reTrain:\n score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)\n sqlManager.insertValue(self.name, 0.0, score.mean(), 0, len(y), 1)\n self.clf.fit(X, y)\n joblib.dump(self.clf, 'model.pkl')\n print('Data Fit')\n return True\n else:\n previousData = sqlManager.selectNewestRecord(self.name)\n if len(previousData) > 0:\n oldSize = previousData[5]\n newSize = len(y)\n accScore = previousData[3]\n score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval\n )\n newAccScore = score.mean()\n print('Old Accuracy Score : ', accScore)\n print('New Accuracy Score : ', newAccScore)\n if accScore <= newAccScore:\n print(\n 'Reinforcement Learning : Newer model is superior. Saving Model.'\n )\n self.clf.fit(X, y)\n sqlManager.insertValue(self.name, accScore, newAccScore,\n oldSize, newSize, 1)\n joblib.dump(self.clf, 'model.pkl')\n return True\n else:\n print(\n 'Reinforcement Learning : Newer model is inferior. Not saving model.'\n )\n return False\n\n def predict(self, X):\n return self.clf.predict(X)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sqlManager.close()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ReinforcementLearner:\n\n def __init__(self, clf=None, load=False, clfName=None):\n \"\"\"\n Initialise the Classifier, either from the provided model or from the stored classifier\n\n :param clf: The current classifier, not yet fitted to the data\n :param load: Set to True in order to load a previously saved model\n \"\"\"\n if load:\n self.clf = joblib.load('model.pkl')\n self.reTrain = True\n else:\n self.clf = clf\n self.reTrain = False\n if clfName == None:\n self.name = self.clf.__class__.__name__\n else:\n self.name = clfName\n\n def fit(self, X, y, scoring='accuracy', crossval=5):\n \"\"\"\n Fit the Reinforcement classifier with data, either adding to previous previous data or learning for first time.\n\n :param X: Input Features\n :param y: Class Labels\n :param scoring: Scoring used for cross validation\n :param crossval: Cross Validation number of folds\n :return: True if a new model is fit to the data, or a previous model is updated\n False if old model when fit to new data performs poorly in comparison to\n earlier data\n \"\"\"\n if not self.reTrain:\n score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)\n sqlManager.insertValue(self.name, 0.0, score.mean(), 0, len(y), 1)\n self.clf.fit(X, y)\n joblib.dump(self.clf, 'model.pkl')\n print('Data Fit')\n return True\n else:\n previousData = sqlManager.selectNewestRecord(self.name)\n if len(previousData) > 0:\n oldSize = previousData[5]\n newSize = len(y)\n accScore = previousData[3]\n score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval\n )\n newAccScore = score.mean()\n print('Old Accuracy Score : ', accScore)\n print('New Accuracy Score : ', newAccScore)\n if accScore <= newAccScore:\n print(\n 'Reinforcement Learning : Newer model is superior. Saving Model.'\n )\n self.clf.fit(X, y)\n sqlManager.insertValue(self.name, accScore, newAccScore,\n oldSize, newSize, 1)\n joblib.dump(self.clf, 'model.pkl')\n return True\n else:\n print(\n 'Reinforcement Learning : Newer model is inferior. Not saving model.'\n )\n return False\n\n def predict(self, X):\n return self.clf.predict(X)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sqlManager.close()\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "import sklearn.metrics as metrics\nimport sklearn.cross_validation as cv\nfrom sklearn.externals import joblib\nimport MachineLearning.Reinforcement.InternalSQLManager as sqlManager\n\nclass ReinforcementLearner:\n\n def __init__(self, clf=None, load=False, clfName=None):\n \"\"\"\n Initialise the Classifier, either from the provided model or from the stored classifier\n\n :param clf: The current classifier, not yet fitted to the data\n :param load: Set to True in order to load a previously saved model\n \"\"\"\n\n if load:\n self.clf = joblib.load(\"model.pkl\")\n self.reTrain = True\n else:\n self.clf = clf\n self.reTrain = False\n\n if clfName == None:\n self.name = self.clf.__class__.__name__\n else:\n self.name = clfName\n\n def fit(self, X, y, scoring=\"accuracy\", crossval=5):\n \"\"\"\n Fit the Reinforcement classifier with data, either adding to previous previous data or learning for first time.\n\n :param X: Input Features\n :param y: Class Labels\n :param scoring: Scoring used for cross validation\n :param crossval: Cross Validation number of folds\n :return: True if a new model is fit to the data, or a previous model is updated\n False if old model when fit to new data performs poorly in comparison to\n earlier data\n \"\"\"\n if not self.reTrain: # Train first time\n score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)\n\n sqlManager.insertValue(self.name, 0.0, score.mean(), 0, len(y), 1) # Store the first result of clf\n self.clf.fit(X, y)\n\n joblib.dump(self.clf, \"model.pkl\") # Store the CLF\n print(\"Data Fit\")\n return True\n else:\n previousData = sqlManager.selectNewestRecord(self.name) # Check the last entry of CLF\n if len(previousData) > 0:\n oldSize = previousData[5]\n newSize = len(y)\n\n accScore = previousData[3]\n\n score = cv.cross_val_score(self.clf, X, y, scoring, cv=crossval)\n newAccScore = score.mean()\n print(\"Old Accuracy Score : \", accScore)\n print(\"New Accuracy Score : \", newAccScore)\n\n if accScore <= newAccScore: # If new data is benefitial, increases accuracy\n print(\"Reinforcement Learning : Newer model is superior. Saving Model.\")\n self.clf.fit(X, y)\n\n sqlManager.insertValue(self.name, accScore, newAccScore, oldSize, newSize, 1)\n joblib.dump(self.clf, \"model.pkl\")\n return True\n else:\n print(\"Reinforcement Learning : Newer model is inferior. Not saving model.\")\n return False\n\n def predict(self, X):\n return self.clf.predict(X)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sqlManager.close()\n\nif __name__ == \"__main__\":\n pass\n\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 13:42:47 2018
@author: zhan
"""
from scipy.spatial.distance import pdist, squareform, cdist
import numpy as np
import scipy.io as sci
import os,sys
import datetime
###################################################################
# I_tr:features of training set for image data
# I_te:features of testing set for image data
# T_te:features of training set for text data
# T_te:features of testing set for text data
# L_tr:category label of training set
# L_te:category label of testing set
###############################################################
def unifyKnnKernel(Z,tr_n_I, te_n_I, tr_n_T, te_n_T,k):
x1 = np.concatenate([range(tr_n_I,tr_n_I+te_n_I),
range(tr_n_I+te_n_I+tr_n_T,tr_n_I+te_n_I+tr_n_T+te_n_T)]);
x2 = np.concatenate([range(0,tr_n_I),
range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);
y1 = np.concatenate([range(0,tr_n_I), range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);
W = Z[x1,:];
W = W[:,y1];
W = W;
Y = Z[x2,:];
Y = Y[:,y1];
Y = Y;
KN = -np.sort(-W);
I = np.argsort(-W);
for i in range(0,te_n_I + te_n_T):
k1 = np.reshape(KN[i,0:k], [1, k]);
knn = np.concatenate([k1, np.zeros([1,tr_n_I + tr_n_T-k])],1);
W[i,I[i,:]] = knn;
WI = W[0:te_n_I, :];
WT = W[te_n_I:te_n_I+te_n_T, :];
WI_s = np.reshape(np.sum(WI, 1), [len(WI),1]);
WT_s = np.reshape(np.sum(WT, 1), [len(WI),1]);
WI = WI/np.tile(WI_s, [1, tr_n_I+tr_n_T]);
WT = WT/np.tile(WT_s, [1, tr_n_T+tr_n_I]);
#W = np.concatenate([WI,WT]);
m = np.reshape(range(tr_n_I), [tr_n_I,1]);
m1 = np.tile(np.concatenate([m, m]),[1,(tr_n_I+tr_n_T)]);
Y0 = (m1 == m1.T);
Y1 = np.multiply(Y,(1.-Y0))+Y0;
h = Y1;
W_IT = np.matmul(np.matmul(WI,h), WT.T);
return W_IT
def computer_av(distance, label):
m, n = np.shape(distance)
av_precision = np.zeros([m, 1])
sort = np.argsort(-distance)
for i in range(m):
cumulate = 0.0
tp_counter = 0.0
for j in range(50):
if np.sum(np.abs(label[sort[i,j]] - label[i])) == 0:
tp_counter += 1.0
cumulate = cumulate + (float(tp_counter)/ float(j+1))
if tp_counter !=0:
av_precision[i] = cumulate/float(tp_counter)
mean_precision = np.mean(av_precision)
return mean_precision
if __name__ == '__main__':
data1 = sci.loadmat('best_data.mat')
begin = datetime.datetime.now()
D1 = pdist(np.concatenate([data1['I_tr'], data1['I_te'],
data1['T_tr'], data1['T_te']]),'cosine');
Z1 = 1.0-squareform(D1)/2.0;
h = []
p = []
for k in range(10, 1000, 10):
distance = unifyKnnKernel(Z1,
len(data1['I_tr']),len(data1['I_te']),
len(data1['T_tr']),len(data1['T_te']),
k)
end = datetime.datetime.now()
re1 = computer_av(distance,data1['L_te'].T)
re2 = computer_av(distance.T, data1['L_te'].T)
avg = (re1 + re2)/2.0
print k
print('The KNN test result:ItoT:{: .4}; TtoI: {: .4}; avg: {: .4}'.format(re1, re2, avg))
f1 = open('knn_test.txt', "a")
f1.write('k: ')
f1.write(str(k))
f1.write('\t')
f1.write('T2I: ')
f1.write(str(re1))
f1.write('\t')
f1.write('I2T: ')
f1.write(str(re2))
f1.write('\t')
f1.write('AVG: ')
f1.write(str(avg))
f1.write('\n')
f1.close()
|
normal
|
{
"blob_id": "db140bf66f3e3a84a60a6617ea4c03cc6a1bc56d",
"index": 6271,
"step-1": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 31 13:42:47 2018\n\n@author: zhan\n\"\"\"\nfrom scipy.spatial.distance import pdist, squareform, cdist\nimport numpy as np\nimport scipy.io as sci\nimport os,sys\nimport datetime\n\n###################################################################\n# I_tr:features of training set for image data\n# I_te:features of testing set for image data\n# T_te:features of training set for text data\n# T_te:features of testing set for text data\n# L_tr:category label of training set\n# L_te:category label of testing set\n\n###############################################################\n\n\ndef unifyKnnKernel(Z,tr_n_I, te_n_I, tr_n_T, te_n_T,k):\n x1 = np.concatenate([range(tr_n_I,tr_n_I+te_n_I),\n range(tr_n_I+te_n_I+tr_n_T,tr_n_I+te_n_I+tr_n_T+te_n_T)]);\n x2 = np.concatenate([range(0,tr_n_I),\n range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);\n y1 = np.concatenate([range(0,tr_n_I), range(tr_n_I+te_n_I,tr_n_I+te_n_I+tr_n_T)]);\n W = Z[x1,:];\n W = W[:,y1];\n W = W;\n Y = Z[x2,:];\n Y = Y[:,y1];\n Y = Y;\n KN = -np.sort(-W);\n I = np.argsort(-W);\n for i in range(0,te_n_I + te_n_T):\n k1 = np.reshape(KN[i,0:k], [1, k]);\n knn = np.concatenate([k1, np.zeros([1,tr_n_I + tr_n_T-k])],1);\n W[i,I[i,:]] = knn;\n WI = W[0:te_n_I, :];\n WT = W[te_n_I:te_n_I+te_n_T, :];\n\n WI_s = np.reshape(np.sum(WI, 1), [len(WI),1]);\n WT_s = np.reshape(np.sum(WT, 1), [len(WI),1]);\n WI = WI/np.tile(WI_s, [1, tr_n_I+tr_n_T]);\n WT = WT/np.tile(WT_s, [1, tr_n_T+tr_n_I]);\n\n #W = np.concatenate([WI,WT]);\n m = np.reshape(range(tr_n_I), [tr_n_I,1]);\n m1 = np.tile(np.concatenate([m, m]),[1,(tr_n_I+tr_n_T)]);\n Y0 = (m1 == m1.T); \n Y1 = np.multiply(Y,(1.-Y0))+Y0;\n h = Y1;\n W_IT = np.matmul(np.matmul(WI,h), WT.T);\n \n return W_IT\n\ndef computer_av(distance, label):\n m, n = np.shape(distance)\n av_precision = np.zeros([m, 1])\n sort = np.argsort(-distance)\n for i in range(m):\n cumulate = 0.0\n tp_counter = 0.0\n for j in range(50):\n if np.sum(np.abs(label[sort[i,j]] - label[i])) == 0:\n tp_counter += 1.0\n cumulate = cumulate + (float(tp_counter)/ float(j+1))\n \n if tp_counter !=0:\n av_precision[i] = cumulate/float(tp_counter)\n mean_precision = np.mean(av_precision)\n return mean_precision \n\n \nif __name__ == '__main__':\n data1 = sci.loadmat('best_data.mat') \n begin = datetime.datetime.now()\n D1 = pdist(np.concatenate([data1['I_tr'], data1['I_te'], \n data1['T_tr'], data1['T_te']]),'cosine');\n Z1 = 1.0-squareform(D1)/2.0;\n h = []\n p = []\n for k in range(10, 1000, 10): \n distance = unifyKnnKernel(Z1,\n len(data1['I_tr']),len(data1['I_te']),\n len(data1['T_tr']),len(data1['T_te']),\n k)\n end = datetime.datetime.now()\n \n \n re1 = computer_av(distance,data1['L_te'].T)\n re2 = computer_av(distance.T, data1['L_te'].T)\n avg = (re1 + re2)/2.0\n print k\n print('The KNN test result:ItoT:{: .4}; TtoI: {: .4}; avg: {: .4}'.format(re1, re2, avg))\n \n\n f1 = open('knn_test.txt', \"a\")\n f1.write('k: ')\n f1.write(str(k))\n f1.write('\\t')\n f1.write('T2I: ')\n f1.write(str(re1))\n f1.write('\\t')\n f1.write('I2T: ')\n f1.write(str(re2))\n f1.write('\\t')\n f1.write('AVG: ')\n f1.write(str(avg))\n f1.write('\\n')\n f1.close()\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def has_dupulicates(word):
d = dict()
for c in word:
if c not in d:
d[c] = 1
else:
d[c] += 1
for k in d:
if d[k] == 1:
print(k)
else:
print(k, d[k])
return d
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def non_dupulicates_lette(word):
text = list(word)
print(text)
i = 0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d = dict()
for c in word:
if c not in d:
d[c] = 1
else:
d[c] += 1
for k in d:
if d[k] == 1:
print(k)
else:
print(k, d[k])
return d
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def non_dupulicates_lette(word):
text = list(word)
print(text)
i = 0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d = dict()
for c in word:
if c not in d:
d[c] = 1
else:
d[c] += 1
for k in d:
if d[k] == 1:
print(k)
else:
print(k, d[k])
return d
<|reserved_special_token_0|>
non_dupulicates_lette(A)
<|reserved_special_token_1|>
def non_dupulicates_lette(word):
text = list(word)
print(text)
i = 0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d = dict()
for c in word:
if c not in d:
d[c] = 1
else:
d[c] += 1
for k in d:
if d[k] == 1:
print(k)
else:
print(k, d[k])
return d
A = 'bccata'
non_dupulicates_lette(A)
<|reserved_special_token_1|>
def non_dupulicates_lette(word):
text = list(word);
print(text)
i=0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d= dict()
for c in word:
if c not in d:
d[c]=1
else:
d[c]+=1
for k in d:
if d[k]==1:
print(k)
else:
print(k,d[k])
return d
#count=0
#othercount=1
#sizeword=len(word)-1
#while count<sizeword:
#letter=word[count]
#while othercount<sizeword:
#if letter == word[othercount]:
#return True
#othercount= othercount+1
#count+=1
#return False
A='bccata'#['a','b','b','c']
non_dupulicates_lette(A)
#result=has_dupulicates(A)
#print(result)
|
flexible
|
{
"blob_id": "8cd234c2ec1b36abd992cc1a46147376cc241ede",
"index": 3276,
"step-1": "<mask token>\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\n",
"step-2": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\n",
"step-3": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\nnon_dupulicates_lette(A)\n",
"step-4": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\nA = 'bccata'\nnon_dupulicates_lette(A)\n",
"step-5": "def non_dupulicates_lette(word):\n text = list(word);\n print(text)\n i=0\n for i in range(len(text)):\n for k in text:\n print(c)\n \ndef has_dupulicates(word):\n d= dict()\n for c in word:\n if c not in d:\n d[c]=1\n \n else:\n d[c]+=1\n\n\n for k in d:\n if d[k]==1:\n print(k)\n \n else:\n print(k,d[k])\n \n \n \n return d\n #count=0\n #othercount=1\n #sizeword=len(word)-1\n #while count<sizeword:\n #letter=word[count]\n #while othercount<sizeword:\n #if letter == word[othercount]:\n #return True\n #othercount= othercount+1\n\n #count+=1\n\n\n #return False\nA='bccata'#['a','b','b','c']\nnon_dupulicates_lette(A)\n#result=has_dupulicates(A)\n#print(result)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Root:
a: List[object] = field(default_factory=list, metadata={'type':
'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,
'sequence': 1})
b: List[object] = field(default_factory=list, metadata={'type':
'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})
<|reserved_special_token_1|>
from dataclasses import dataclass, field
from typing import List
@dataclass
class Root:
a: List[object] = field(default_factory=list, metadata={'type':
'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,
'sequence': 1})
b: List[object] = field(default_factory=list, metadata={'type':
'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})
<|reserved_special_token_1|>
from dataclasses import dataclass, field
from typing import List
@dataclass
class Root:
a: List[object] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 2,
"max_occurs": 4,
"sequence": 1,
}
)
b: List[object] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"max_occurs": 2,
"sequence": 1,
}
)
|
flexible
|
{
"blob_id": "7e318ae7317eac90d6ce9a6b1d0dcc8ff65abef0",
"index": 9430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Root:\n a: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,\n 'sequence': 1})\n b: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})\n",
"step-3": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Root:\n a: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,\n 'sequence': 1})\n b: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})\n",
"step-4": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Root:\n a: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"min_occurs\": 2,\n \"max_occurs\": 4,\n \"sequence\": 1,\n }\n )\n b: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"max_occurs\": 2,\n \"sequence\": 1,\n }\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import kubernetes.client
from kubernetes.client.rest import ApiException
from pprint import pprint
from kubeops_api.models.cluster import Cluster
class ClusterMonitor():
def __init__(self,cluster):
self.cluster = cluster
self.token = self.cluster.get_cluster_token()
self.cluster.change_to()
master = self.cluster.group_set.get(name='master').hosts.first()
configuration = kubernetes.client.Configuration()
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.api_key['authorization'] = self.token
print('---token----')
print(self.token)
configuration.debug = True
configuration.host = 'https://'+master.ip+":6443"
configuration.verify_ssl = False
print('https://'+master.ip+":6443")
self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))
def list_pods(self):
pods = self.api_instance.list_pod_for_all_namespaces()
return pods
|
normal
|
{
"blob_id": "da41f26489c477e0df9735606457bd4ee4e5a396",
"index": 4465,
"step-1": "<mask token>\n\n\nclass ClusterMonitor:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ClusterMonitor:\n\n def __init__(self, cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://' + master.ip + ':6443'\n configuration.verify_ssl = False\n print('https://' + master.ip + ':6443')\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.\n ApiClient(configuration))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ClusterMonitor:\n\n def __init__(self, cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://' + master.ip + ':6443'\n configuration.verify_ssl = False\n print('https://' + master.ip + ':6443')\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.\n ApiClient(configuration))\n\n def list_pods(self):\n pods = self.api_instance.list_pod_for_all_namespaces()\n return pods\n",
"step-4": "import kubernetes.client\nfrom kubernetes.client.rest import ApiException\nfrom pprint import pprint\nfrom kubeops_api.models.cluster import Cluster\n\n\nclass ClusterMonitor:\n\n def __init__(self, cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://' + master.ip + ':6443'\n configuration.verify_ssl = False\n print('https://' + master.ip + ':6443')\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.\n ApiClient(configuration))\n\n def list_pods(self):\n pods = self.api_instance.list_pod_for_all_namespaces()\n return pods\n",
"step-5": "import kubernetes.client\nfrom kubernetes.client.rest import ApiException\nfrom pprint import pprint\nfrom kubeops_api.models.cluster import Cluster\n\nclass ClusterMonitor():\n\n def __init__(self,cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://'+master.ip+\":6443\"\n configuration.verify_ssl = False\n print('https://'+master.ip+\":6443\")\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))\n\n def list_pods(self):\n pods = self.api_instance.list_pod_for_all_namespaces()\n return pods\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def synonym_alternatives_range(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_range = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_range[word_int] = max(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour)
return synonym_alternatives_range
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def synonym_alternatives_range(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_range = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_range[word_int] = max(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour)
return synonym_alternatives_range
def synonym_alternatives_average(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_average = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_average[word_int] = (DistToAltOne +
DistToAltTwo + DistToAltThree + DistToAltFour) / 4
return synonym_alternatives_average
def nth_neighbor_filter():
""" Maybe we won't have this.
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def synonym_filter(WordVectors_npArray, WordLabels_npArray):
"""
"""
pass
def synonym_alternatives_range(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_range = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_range[word_int] = max(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour)
return synonym_alternatives_range
def synonym_alternatives_average(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_average = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_average[word_int] = (DistToAltOne +
DistToAltTwo + DistToAltThree + DistToAltFour) / 4
return synonym_alternatives_average
def nth_neighbor_filter():
""" Maybe we won't have this.
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from scipy.spatial import distance
def synonym_filter(WordVectors_npArray, WordLabels_npArray):
"""
"""
pass
def synonym_alternatives_range(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_range = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_range[word_int] = max(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,
DistToAltTwo, DistToAltThree, DistToAltFour)
return synonym_alternatives_range
def synonym_alternatives_average(WordVectors_npArray,
AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):
"""
"""
synonym_alternatives_average = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorOne_npArray[word_int, :])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorTwo_npArray[word_int, :])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorThree_npArray[word_int, :])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],
AlternativesVectorFour_npArray[word_int, :])
print(DistToAltFour)
synonym_alternatives_average[word_int] = (DistToAltOne +
DistToAltTwo + DistToAltThree + DistToAltFour) / 4
return synonym_alternatives_average
def nth_neighbor_filter():
""" Maybe we won't have this.
"""
pass
<|reserved_special_token_1|>
'''
'''
import numpy as np
from scipy.spatial import distance
def synonym_filter(WordVectors_npArray, WordLabels_npArray):
'''
'''
pass
def synonym_alternatives_range(WordVectors_npArray,
AlternativesVectorOne_npArray,
AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray,
AlternativesVectorFour_npArray):
'''
'''
synonym_alternatives_range = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorOne_npArray[word_int,:])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorTwo_npArray[word_int,:])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorThree_npArray[word_int,:])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorFour_npArray[word_int,:])
print(DistToAltFour)
synonym_alternatives_range[word_int] = (max(DistToAltOne, \
DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne, \
DistToAltTwo, DistToAltThree, DistToAltFour))
return synonym_alternatives_range
def synonym_alternatives_average(WordVectors_npArray,
AlternativesVectorOne_npArray,
AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray,
AlternativesVectorFour_npArray):
'''
'''
synonym_alternatives_average = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorOne_npArray[word_int,:])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorTwo_npArray[word_int,:])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorThree_npArray[word_int,:])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorFour_npArray[word_int,:])
print(DistToAltFour)
synonym_alternatives_average[word_int] = (DistToAltOne +\
DistToAltTwo + DistToAltThree + DistToAltFour)/4
return synonym_alternatives_average
def nth_neighbor_filter():
''' Maybe we won't have this.
'''
pass
|
flexible
|
{
"blob_id": "ea0a59953f2571f36e65f8f958774074b39a9ae5",
"index": 6996,
"step-1": "<mask token>\n\n\ndef synonym_alternatives_range(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_range = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_range[word_int] = max(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour)\n return synonym_alternatives_range\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef synonym_alternatives_range(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_range = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_range[word_int] = max(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour)\n return synonym_alternatives_range\n\n\ndef synonym_alternatives_average(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_average = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_average[word_int] = (DistToAltOne +\n DistToAltTwo + DistToAltThree + DistToAltFour) / 4\n return synonym_alternatives_average\n\n\ndef nth_neighbor_filter():\n \"\"\" Maybe we won't have this.\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\ndef synonym_filter(WordVectors_npArray, WordLabels_npArray):\n \"\"\"\n \"\"\"\n pass\n\n\ndef synonym_alternatives_range(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_range = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_range[word_int] = max(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour)\n return synonym_alternatives_range\n\n\ndef synonym_alternatives_average(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_average = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_average[word_int] = (DistToAltOne +\n DistToAltTwo + DistToAltThree + DistToAltFour) / 4\n return synonym_alternatives_average\n\n\ndef nth_neighbor_filter():\n \"\"\" Maybe we won't have this.\n \"\"\"\n pass\n",
"step-4": "<mask token>\nimport numpy as np\nfrom scipy.spatial import distance\n\n\ndef synonym_filter(WordVectors_npArray, WordLabels_npArray):\n \"\"\"\n \"\"\"\n pass\n\n\ndef synonym_alternatives_range(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_range = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_range[word_int] = max(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne,\n DistToAltTwo, DistToAltThree, DistToAltFour)\n return synonym_alternatives_range\n\n\ndef synonym_alternatives_average(WordVectors_npArray,\n AlternativesVectorOne_npArray, AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray, AlternativesVectorFour_npArray):\n \"\"\"\n \"\"\"\n synonym_alternatives_average = np.zeros(len(WordVectors_npArray))\n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorOne_npArray[word_int, :])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorTwo_npArray[word_int, :])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorThree_npArray[word_int, :])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int, :],\n AlternativesVectorFour_npArray[word_int, :])\n print(DistToAltFour)\n synonym_alternatives_average[word_int] = (DistToAltOne +\n DistToAltTwo + DistToAltThree + DistToAltFour) / 4\n return synonym_alternatives_average\n\n\ndef nth_neighbor_filter():\n \"\"\" Maybe we won't have this.\n \"\"\"\n pass\n",
"step-5": "'''\n'''\n\nimport numpy as np\n\nfrom scipy.spatial import distance\n\n\ndef synonym_filter(WordVectors_npArray, WordLabels_npArray):\n '''\n '''\n \n \n pass\n\ndef synonym_alternatives_range(WordVectors_npArray, \n AlternativesVectorOne_npArray,\n AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray,\n AlternativesVectorFour_npArray):\n '''\n '''\n \n \n synonym_alternatives_range = np.zeros(len(WordVectors_npArray))\n \n for word_int in range(len(WordVectors_npArray)):\n \n DistToAltOne = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorOne_npArray[word_int,:])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorTwo_npArray[word_int,:])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorThree_npArray[word_int,:])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorFour_npArray[word_int,:])\n print(DistToAltFour)\n \n synonym_alternatives_range[word_int] = (max(DistToAltOne, \\\n DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne, \\\n DistToAltTwo, DistToAltThree, DistToAltFour))\n \n \n return synonym_alternatives_range\n \ndef synonym_alternatives_average(WordVectors_npArray, \n AlternativesVectorOne_npArray,\n AlternativesVectorTwo_npArray,\n AlternativesVectorThree_npArray,\n AlternativesVectorFour_npArray):\n '''\n '''\n \n \n synonym_alternatives_average = np.zeros(len(WordVectors_npArray))\n \n for word_int in range(len(WordVectors_npArray)):\n DistToAltOne = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorOne_npArray[word_int,:])\n print(DistToAltOne)\n DistToAltTwo = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorTwo_npArray[word_int,:])\n print(DistToAltTwo)\n DistToAltThree = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorThree_npArray[word_int,:])\n print(DistToAltThree)\n DistToAltFour = distance.cosine(WordVectors_npArray[word_int,:], \\\n AlternativesVectorFour_npArray[word_int,:])\n print(DistToAltFour)\n \n synonym_alternatives_average[word_int] = (DistToAltOne +\\\n DistToAltTwo + DistToAltThree + DistToAltFour)/4\n \n return synonym_alternatives_average\n \n \n\ndef nth_neighbor_filter():\n ''' Maybe we won't have this.\n '''\n \n \n pass\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class RefTrackCollectionRegistry(object):
<|reserved_special_token_0|>
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
<|reserved_special_token_0|>
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RefTrackCollectionRegistry(object):
<|reserved_special_token_0|>
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in
trackFiles):
return False
return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(
trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[
1] in self._trackIndex2CollectionReg and trackFiles[2
] in self._allCollections
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in
trackFiles):
return False
return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(
trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[
1] in self._trackIndex2CollectionReg and trackFiles[2
] in self._allCollections
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from collections import defaultdict
from past.builtins import basestring
from pycolocstats.core.config import REF_COLL_GSUITES_PATH
__metaclass__ = type
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn
).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[
trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection)
)
return collStrList
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and (trackFile == self.
PREBUILT or trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in
trackFiles):
return False
return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(
trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[
1] in self._trackIndex2CollectionReg and trackFiles[2
] in self._allCollections
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
refTrackCollRegistry = RefTrackCollectionRegistry()
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from collections import defaultdict
from past.builtins import basestring
from pycolocstats.core.config import REF_COLL_GSUITES_PATH
__metaclass__ = type
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection))
return collStrList
# Temporary solution. Should be refactored to not make use of setReferenceTrackFileNames()
# in Method classes.
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and \
(trackFile == self.PREBUILT or
trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in trackFiles):
return False
return (len(trackFiles) == 1 and
trackFiles[0] == self.PREBUILT) or \
(len(trackFiles) == 3 and
trackFiles[0] == self.PREBUILT and
trackFiles[1] in self._trackIndex2CollectionReg and
trackFiles[2] in self._allCollections)
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
refTrackCollRegistry = RefTrackCollectionRegistry()
|
flexible
|
{
"blob_id": "9c2cc5b993f020b8a1c96ea4cd5c2fb2da44a251",
"index": 1534,
"step-1": "<mask token>\n\n\nclass RefTrackCollectionRegistry(object):\n <mask token>\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n <mask token>\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RefTrackCollectionRegistry(object):\n <mask token>\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in\n trackFiles):\n return False\n return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(\n trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[\n 1] in self._trackIndex2CollectionReg and trackFiles[2\n ] in self._allCollections\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RefTrackCollectionRegistry(object):\n PREBUILT = '__prebuilt__'\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in\n trackFiles):\n return False\n return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(\n trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[\n 1] in self._trackIndex2CollectionReg and trackFiles[2\n ] in self._allCollections\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\n<mask token>\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nfrom collections import defaultdict\nfrom past.builtins import basestring\nfrom pycolocstats.core.config import REF_COLL_GSUITES_PATH\n__metaclass__ = type\n\n\nclass RefTrackCollectionRegistry(object):\n PREBUILT = '__prebuilt__'\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in\n trackFiles):\n return False\n return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(\n trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[\n 1] in self._trackIndex2CollectionReg and trackFiles[2\n ] in self._allCollections\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\nrefTrackCollRegistry = RefTrackCollectionRegistry()\n",
"step-5": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nfrom collections import defaultdict\nfrom past.builtins import basestring\nfrom pycolocstats.core.config import REF_COLL_GSUITES_PATH\n\n__metaclass__ = type\n\n\nclass RefTrackCollectionRegistry(object):\n PREBUILT = '__prebuilt__'\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection))\n return collStrList\n\n # Temporary solution. Should be refactored to not make use of setReferenceTrackFileNames()\n # in Method classes.\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and \\\n (trackFile == self.PREBUILT or\n trackFile in self._trackIndex2CollectionReg or\n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in trackFiles):\n return False\n return (len(trackFiles) == 1 and\n trackFiles[0] == self.PREBUILT) or \\\n (len(trackFiles) == 3 and\n trackFiles[0] == self.PREBUILT and\n trackFiles[1] in self._trackIndex2CollectionReg and\n trackFiles[2] in self._allCollections)\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\nrefTrackCollRegistry = RefTrackCollectionRegistry()\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
import datetime
import pendulum
import requests
from prefect import task, Flow, Parameter
from prefect.engine.signals import SKIP
from prefect.tasks.notifications.slack_task import SlackTask
from prefect.tasks.secrets import Secret
city = Parameter(name="City", default="San Jose")
api_key = Secret("WEATHER_API_KEY")
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = "http://api.openweathermap.org/data/2.5/forecast?"
url = base_url + "appid=" + api_key + "&q=" + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now("utc").add(days=1).strftime("%Y-%m-%d")
rain = [
w
for forecast in data["list"]
for w in forecast["weather"]
if w["main"] == "Rain" and forecast["dt_txt"].startswith(tomorrow)
]
if not bool(rain):
raise SKIP("There is no rain in the forecast for tomorrow.")
notification = SlackTask(
message="There is rain in the forecast for tomorrow - better take your umbrella out!",
webhook_secret="DAVID_SLACK_URL",
)
with Flow("Umbrella Flow") as flow:
forecast = pull_forecast(city=city, api_key=api_key)
rain = is_raining_tomorrow(forecast)
notification.set_upstream(rain)
|
normal
|
{
"blob_id": "7f52354487f85a0bf1783c8aa76f228ef17e6d6b",
"index": 5119,
"step-1": "<mask token>\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\n<mask token>\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-3": "<mask token>\ncity = Parameter(name='City', default='San Jose')\napi_key = Secret('WEATHER_API_KEY')\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\nnotification = SlackTask(message=\n 'There is rain in the forecast for tomorrow - better take your umbrella out!'\n , webhook_secret='DAVID_SLACK_URL')\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-4": "import datetime\nimport pendulum\nimport requests\nfrom prefect import task, Flow, Parameter\nfrom prefect.engine.signals import SKIP\nfrom prefect.tasks.notifications.slack_task import SlackTask\nfrom prefect.tasks.secrets import Secret\ncity = Parameter(name='City', default='San Jose')\napi_key = Secret('WEATHER_API_KEY')\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\nnotification = SlackTask(message=\n 'There is rain in the forecast for tomorrow - better take your umbrella out!'\n , webhook_secret='DAVID_SLACK_URL')\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-5": "import datetime\nimport pendulum\nimport requests\nfrom prefect import task, Flow, Parameter\nfrom prefect.engine.signals import SKIP\nfrom prefect.tasks.notifications.slack_task import SlackTask\nfrom prefect.tasks.secrets import Secret\n\n\ncity = Parameter(name=\"City\", default=\"San Jose\")\napi_key = Secret(\"WEATHER_API_KEY\")\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now(\"utc\").add(days=1).strftime(\"%Y-%m-%d\")\n rain = [\n w\n for forecast in data[\"list\"]\n for w in forecast[\"weather\"]\n if w[\"main\"] == \"Rain\" and forecast[\"dt_txt\"].startswith(tomorrow)\n ]\n if not bool(rain):\n raise SKIP(\"There is no rain in the forecast for tomorrow.\")\n\n\nnotification = SlackTask(\n message=\"There is rain in the forecast for tomorrow - better take your umbrella out!\",\n webhook_secret=\"DAVID_SLACK_URL\",\n)\n\n\nwith Flow(\"Umbrella Flow\") as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def matrix_divided(matrix, div):
"""Divides a Matrix
Args:
matrix: A list of lists of ints or floats
div: a non zero int or float
Exceptions:
TypeError: if the matrix and/or div is not as stated or the matrix elements
are not of the same size
ZeroDivisionError: if div is zero
Returns: a new matrix holding the results
"""
workmat = []
WrongType = False
TooLong = False
i = 0
if isinstance(matrix, list):
if matrix == []:
WrongType = True
for x in range(len(matrix)):
if isinstance(matrix[x], list):
workmat.append([])
for y in range(len(matrix[x])):
if matrix[x] == []:
WrongType = True
if isinstance(matrix[x][y], int) or isinstance(matrix[x
][y], int):
workmat[x].append(matrix[x][y])
else:
WrongType = True
if x == 0 and y == 0:
i = len(matrix[x])
elif not i == len(matrix[x]):
TooLong = True
else:
WrongType = True
else:
WrongType = True
if WrongType:
raise TypeError(
'matrix must be a matrix (list of lists) of integers/floats')
if TooLong:
raise TypeError('Each row of the matrix must have the same size')
if not isinstance(div, float) and not isinstance(div, int):
raise TypeError('div must be a number')
if div == 0:
raise ZeroDivisionError('division by zero')
for x in range(len(workmat)):
for y in range(len(workmat[x])):
workmat[x][y] = round(workmat[x][y] / div, 2)
return workmat
<|reserved_special_token_1|>
#!/usr/bin/python3
"""1. Divide a matrix """
def matrix_divided(matrix, div):
"""Divides a Matrix
Args:
matrix: A list of lists of ints or floats
div: a non zero int or float
Exceptions:
TypeError: if the matrix and/or div is not as stated or the matrix elements
are not of the same size
ZeroDivisionError: if div is zero
Returns: a new matrix holding the results
"""
workmat = []
WrongType = False
TooLong = False
i = 0
if isinstance(matrix, list):
if matrix == []:
WrongType = True
for x in range(len(matrix)):
if isinstance(matrix[x], list):
workmat.append([])
for y in range(len(matrix[x])):
if matrix[x] == []:
WrongType = True
if (
isinstance(matrix[x][y], int) or
isinstance(matrix[x][y], int)
):
workmat[x].append(matrix[x][y])
else:
WrongType = True
if x == 0 and y == 0:
i = len(matrix[x])
else:
if not i == len(matrix[x]):
TooLong = True
else:
WrongType = True
else:
WrongType = True
if WrongType:
raise TypeError(
"matrix must be a matrix (list of lists) of integers/floats")
if TooLong:
raise TypeError(
"Each row of the matrix must have the same size")
if not isinstance(div, float) and not isinstance(div, int):
raise TypeError(
"div must be a number")
if div == 0:
raise ZeroDivisionError(
"division by zero")
for x in range(len(workmat)):
for y in range(len(workmat[x])):
workmat[x][y] = round((workmat[x][y] / div), 2)
return workmat
|
flexible
|
{
"blob_id": "95c5971a102fb2ed84ab0de0471278d0167d8359",
"index": 22,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Divides a Matrix\n\n Args:\n matrix: A list of lists of ints or floats\n div: a non zero int or float\n\n Exceptions:\n TypeError: if the matrix and/or div is not as stated or the matrix elements\n are not of the same size\n ZeroDivisionError: if div is zero\n\n Returns: a new matrix holding the results\n\n \"\"\"\n workmat = []\n WrongType = False\n TooLong = False\n i = 0\n if isinstance(matrix, list):\n if matrix == []:\n WrongType = True\n for x in range(len(matrix)):\n if isinstance(matrix[x], list):\n workmat.append([])\n for y in range(len(matrix[x])):\n if matrix[x] == []:\n WrongType = True\n if isinstance(matrix[x][y], int) or isinstance(matrix[x\n ][y], int):\n workmat[x].append(matrix[x][y])\n else:\n WrongType = True\n if x == 0 and y == 0:\n i = len(matrix[x])\n elif not i == len(matrix[x]):\n TooLong = True\n else:\n WrongType = True\n else:\n WrongType = True\n if WrongType:\n raise TypeError(\n 'matrix must be a matrix (list of lists) of integers/floats')\n if TooLong:\n raise TypeError('Each row of the matrix must have the same size')\n if not isinstance(div, float) and not isinstance(div, int):\n raise TypeError('div must be a number')\n if div == 0:\n raise ZeroDivisionError('division by zero')\n for x in range(len(workmat)):\n for y in range(len(workmat[x])):\n workmat[x][y] = round(workmat[x][y] / div, 2)\n return workmat\n",
"step-3": "#!/usr/bin/python3\n\"\"\"1. Divide a matrix \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Divides a Matrix\n\n Args:\n matrix: A list of lists of ints or floats\n div: a non zero int or float\n\n Exceptions:\n TypeError: if the matrix and/or div is not as stated or the matrix elements\n are not of the same size\n ZeroDivisionError: if div is zero\n\n Returns: a new matrix holding the results\n\n \"\"\"\n workmat = []\n WrongType = False\n TooLong = False\n i = 0\n if isinstance(matrix, list):\n if matrix == []:\n WrongType = True\n for x in range(len(matrix)):\n if isinstance(matrix[x], list):\n workmat.append([])\n for y in range(len(matrix[x])):\n if matrix[x] == []:\n WrongType = True\n if (\n isinstance(matrix[x][y], int) or\n isinstance(matrix[x][y], int)\n ):\n workmat[x].append(matrix[x][y])\n else:\n WrongType = True\n if x == 0 and y == 0:\n i = len(matrix[x])\n else:\n if not i == len(matrix[x]):\n TooLong = True\n else:\n WrongType = True\n else:\n WrongType = True\n if WrongType:\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n if TooLong:\n raise TypeError(\n \"Each row of the matrix must have the same size\")\n if not isinstance(div, float) and not isinstance(div, int):\n raise TypeError(\n \"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\n \"division by zero\")\n\n for x in range(len(workmat)):\n for y in range(len(workmat[x])):\n workmat[x][y] = round((workmat[x][y] / div), 2)\n return workmat\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = []
operations = [migrations.CreateModel(name='Member', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('email', models.EmailField(max_length=75
)), ('total_subscription', models.IntegerField(default=0))],
options={}, bases=(models.Model,)), migrations.CreateModel(name=
'MemberSubscription', fields=[('id', models.AutoField(verbose_name=
'ID', serialize=False, auto_created=True, primary_key=True)), (
'member', models.ForeignKey(to='members.Member'))], options={},
bases=(models.Model,)), migrations.CreateModel(name='Subscription',
fields=[('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('subreddit', models.
CharField(max_length=200)), ('count', models.IntegerField(default=5
))], options={}, bases=(models.Model,)), migrations.
AlterUniqueTogether(name='subscription', unique_together=set([(
'subreddit', 'count')])), migrations.AddField(model_name=
'membersubscription', name='subscription', field=models.ForeignKey(
to='members.Subscription'), preserve_default=True), migrations.
AddField(model_name='member', name='subscription', field=models.
ManyToManyField(to='members.Subscription', through=
'members.MemberSubscription'), preserve_default=True)]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [migrations.CreateModel(name='Member', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('email', models.EmailField(max_length=75
)), ('total_subscription', models.IntegerField(default=0))],
options={}, bases=(models.Model,)), migrations.CreateModel(name=
'MemberSubscription', fields=[('id', models.AutoField(verbose_name=
'ID', serialize=False, auto_created=True, primary_key=True)), (
'member', models.ForeignKey(to='members.Member'))], options={},
bases=(models.Model,)), migrations.CreateModel(name='Subscription',
fields=[('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('subreddit', models.
CharField(max_length=200)), ('count', models.IntegerField(default=5
))], options={}, bases=(models.Model,)), migrations.
AlterUniqueTogether(name='subscription', unique_together=set([(
'subreddit', 'count')])), migrations.AddField(model_name=
'membersubscription', name='subscription', field=models.ForeignKey(
to='members.Subscription'), preserve_default=True), migrations.
AddField(model_name='member', name='subscription', field=models.
ManyToManyField(to='members.Subscription', through=
'members.MemberSubscription'), preserve_default=True)]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=75)),
('total_subscription', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MemberSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('member', models.ForeignKey(to='members.Member')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subreddit', models.CharField(max_length=200)),
('count', models.IntegerField(default=5)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='subscription',
unique_together=set([('subreddit', 'count')]),
),
migrations.AddField(
model_name='membersubscription',
name='subscription',
field=models.ForeignKey(to='members.Subscription'),
preserve_default=True,
),
migrations.AddField(
model_name='member',
name='subscription',
field=models.ManyToManyField(to='members.Subscription', through='members.MemberSubscription'),
preserve_default=True,
),
]
|
flexible
|
{
"blob_id": "4e383130b185c6147315517d166ffe66be1be40d",
"index": 4577,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Member', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('email', models.EmailField(max_length=75\n )), ('total_subscription', models.IntegerField(default=0))],\n options={}, bases=(models.Model,)), migrations.CreateModel(name=\n 'MemberSubscription', fields=[('id', models.AutoField(verbose_name=\n 'ID', serialize=False, auto_created=True, primary_key=True)), (\n 'member', models.ForeignKey(to='members.Member'))], options={},\n bases=(models.Model,)), migrations.CreateModel(name='Subscription',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('subreddit', models.\n CharField(max_length=200)), ('count', models.IntegerField(default=5\n ))], options={}, bases=(models.Model,)), migrations.\n AlterUniqueTogether(name='subscription', unique_together=set([(\n 'subreddit', 'count')])), migrations.AddField(model_name=\n 'membersubscription', name='subscription', field=models.ForeignKey(\n to='members.Subscription'), preserve_default=True), migrations.\n AddField(model_name='member', name='subscription', field=models.\n ManyToManyField(to='members.Subscription', through=\n 'members.MemberSubscription'), preserve_default=True)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Member', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('email', models.EmailField(max_length=75\n )), ('total_subscription', models.IntegerField(default=0))],\n options={}, bases=(models.Model,)), migrations.CreateModel(name=\n 'MemberSubscription', fields=[('id', models.AutoField(verbose_name=\n 'ID', serialize=False, auto_created=True, primary_key=True)), (\n 'member', models.ForeignKey(to='members.Member'))], options={},\n bases=(models.Model,)), migrations.CreateModel(name='Subscription',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('subreddit', models.\n CharField(max_length=200)), ('count', models.IntegerField(default=5\n ))], options={}, bases=(models.Model,)), migrations.\n AlterUniqueTogether(name='subscription', unique_together=set([(\n 'subreddit', 'count')])), migrations.AddField(model_name=\n 'membersubscription', name='subscription', field=models.ForeignKey(\n to='members.Subscription'), preserve_default=True), migrations.\n AddField(model_name='member', name='subscription', field=models.\n ManyToManyField(to='members.Subscription', through=\n 'members.MemberSubscription'), preserve_default=True)]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('email', models.EmailField(max_length=75)),\n ('total_subscription', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='MemberSubscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('member', models.ForeignKey(to='members.Member')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('subreddit', models.CharField(max_length=200)),\n ('count', models.IntegerField(default=5)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='subscription',\n unique_together=set([('subreddit', 'count')]),\n ),\n migrations.AddField(\n model_name='membersubscription',\n name='subscription',\n field=models.ForeignKey(to='members.Subscription'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='member',\n name='subscription',\n field=models.ManyToManyField(to='members.Subscription', through='members.MemberSubscription'),\n preserve_default=True,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.conf.urls import url, include
from api.resources import PlayerResource, GameResource
from . import views
player_resource = PlayerResource()
game_resource = GameResource()
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^api/', include(player_resource.urls)),
url(r'^api/', include(game_resource.urls)),
]
|
normal
|
{
"blob_id": "ff959a388438a6d9c6d418e28c676ec3fd196ea0",
"index": 6076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplayer_resource = PlayerResource()\ngame_resource = GameResource()\nurlpatterns = [url('^$', views.index, name='index'), url('^api/', include(\n player_resource.urls)), url('^api/', include(game_resource.urls))]\n",
"step-3": "from django.conf.urls import url, include\nfrom api.resources import PlayerResource, GameResource\nfrom . import views\nplayer_resource = PlayerResource()\ngame_resource = GameResource()\nurlpatterns = [url('^$', views.index, name='index'), url('^api/', include(\n player_resource.urls)), url('^api/', include(game_resource.urls))]\n",
"step-4": "from django.conf.urls import url, include\nfrom api.resources import PlayerResource, GameResource\nfrom . import views\n\nplayer_resource = PlayerResource()\ngame_resource = GameResource()\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^api/', include(player_resource.urls)),\n url(r'^api/', include(game_resource.urls)),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Copyright (c) 2007 by the Pallets team.
Some rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
from sentry_sdk._compat import iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
#
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361
#
# We need this function because Django does not give us a "pure" http header
# dict. So we might as well use it for all WSGI integrations.
#
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
#
# `get_host` comes from `werkzeug.wsgi.get_host`
# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145
#
def get_host(environ, use_x_forwarded_for=False):
# type: (Dict[str, str], bool) -> str
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and "HTTP_X_FORWARDED_HOST" in environ:
rv = environ["HTTP_X_FORWARDED_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv
|
normal
|
{
"blob_id": "53cd9d5a79e97bb1af69446a82c747248c3cc298",
"index": 1367,
"step-1": "<mask token>\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-4": "<mask token>\nfrom sentry_sdk._compat import iteritems\nfrom sentry_sdk._types import TYPE_CHECKING\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-5": "\"\"\"\nCopyright (c) 2007 by the Pallets team.\n\nSome rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND\nCONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,\nBUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\nCOPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\nNOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\nUSE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\nTHIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGE.\n\"\"\"\n\nfrom sentry_sdk._compat import iteritems\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\n#\n# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`\n# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361\n#\n# We need this function because Django does not give us a \"pure\" http header\n# dict. So we might as well use it for all WSGI integrations.\n#\ndef _get_headers(environ):\n # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith(\"HTTP_\") and key not in (\n \"HTTP_CONTENT_TYPE\",\n \"HTTP_CONTENT_LENGTH\",\n ):\n yield key[5:].replace(\"_\", \"-\").title(), value\n elif key in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n yield key.replace(\"_\", \"-\").title(), value\n\n\n#\n# `get_host` comes from `werkzeug.wsgi.get_host`\n# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145\n#\ndef get_host(environ, use_x_forwarded_for=False):\n # type: (Dict[str, str], bool) -> str\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and \"HTTP_X_FORWARDED_HOST\" in environ:\n rv = environ[\"HTTP_X_FORWARDED_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"HTTP_HOST\"):\n rv = environ[\"HTTP_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"SERVER_NAME\"):\n rv = environ[\"SERVER_NAME\"]\n if (environ[\"wsgi.url_scheme\"], environ[\"SERVER_PORT\"]) not in (\n (\"https\", \"443\"),\n (\"http\", \"80\"),\n ):\n rv += \":\" + environ[\"SERVER_PORT\"]\n else:\n # In spite of the WSGI spec, SERVER_NAME might not be present.\n rv = \"unknown\"\n\n return rv\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatasetFileManager(ABC):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatasetFileManager(ABC):
@abstractmethod
def read_dataset(self):
pass
<|reserved_special_token_1|>
from abc import ABC, abstractmethod
class DatasetFileManager(ABC):
@abstractmethod
def read_dataset(self):
pass
|
flexible
|
{
"blob_id": "5ef65ace397be17be62625ed27b5753d15565d61",
"index": 555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DatasetFileManager(ABC):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DatasetFileManager(ABC):\n\n @abstractmethod\n def read_dataset(self):\n pass\n",
"step-4": "from abc import ABC, abstractmethod\n\n\nclass DatasetFileManager(ABC):\n\n @abstractmethod\n def read_dataset(self):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from app.routes import home
from .home import bp as home
from .dashboard import bp as dashboard
|
normal
|
{
"blob_id": "358a4948ac1f60e0966328cebf401777042c3d0e",
"index": 5239,
"step-1": "<mask token>\n",
"step-2": "from app.routes import home\nfrom .home import bp as home\nfrom .dashboard import bp as dashboard\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['resolver']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from acres.resolution import resolver
__all__ = ['resolver']
<|reserved_special_token_1|>
"""
Package with a facade to the several expansion strategies.
"""
from acres.resolution import resolver
__all__ = ['resolver']
|
flexible
|
{
"blob_id": "e31267871453d87aee409f1c751c36908f7f151a",
"index": 804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['resolver']\n",
"step-3": "<mask token>\nfrom acres.resolution import resolver\n__all__ = ['resolver']\n",
"step-4": "\"\"\"\nPackage with a facade to the several expansion strategies.\n\"\"\"\nfrom acres.resolution import resolver\n\n__all__ = ['resolver']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
def squeezed (client_name):
return client_name.replace('Индивидуальный предприниматель', 'ИП')
def get_kkm_filled_fn(max_fill=80):
## возвращает список ККМ с заполнением ФН больше max_fill в %
LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'
API_URL = 'https://pk.platformaofd.ru/api/monitoring'
session = requests.Session()
print('-= подключение к серверу =-')
session.get(LOGIN_URL)
login_data = {
'email': 'efimova@21smart.ru',
'password': 'smart620514',
'username': 'efimova@21smart.ru',
'phone':''}
print('-= авторизация =-')
session.post(LOGIN_URL, data=login_data)
# запрос всех ККМ, кроме архивных (headers обязательно !)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
payload = '{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}'
print('-= получение данных с сервера =-')
r = session.post (API_URL, data=payload, headers=headers)
data_from_api = r.json()
all_kkm_list = data_from_api['result']['data']
kkm_quanity = len(all_kkm_list)
print('-= обработка данных =-')
kkm_with_filled_fn = []
for kkm in all_kkm_list:
fn_used = int(kkm['fnSpaceUsed'].strip("'%"))
if fn_used >= max_fill:
kkm_with_filled_fn.append(kkm)
return kkm_with_filled_fn
max_fill = 80
x = get_kkm_filled_fn(max_fill)
print(f'ККМ с заполненностью ФН выше {max_fill}%.')
for k in x:
print(f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}")
|
normal
|
{
"blob_id": "cd2e03666a890d6e9ea0fcb45fe28510d684916d",
"index": 83,
"step-1": "<mask token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': 'efimova@21smart.ru', 'password': 'smart620514',\n 'username': 'efimova@21smart.ru', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': 'efimova@21smart.ru', 'password': 'smart620514',\n 'username': 'efimova@21smart.ru', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<mask token>\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n",
"step-3": "<mask token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': 'efimova@21smart.ru', 'password': 'smart620514',\n 'username': 'efimova@21smart.ru', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n",
"step-4": "import requests\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': 'efimova@21smart.ru', 'password': 'smart620514',\n 'username': 'efimova@21smart.ru', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n",
"step-5": "import requests\n\ndef squeezed (client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\ndef get_kkm_filled_fn(max_fill=80):\n## возвращает список ККМ с заполнением ФН больше max_fill в %\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n\n login_data = {\n 'email': 'efimova@21smart.ru',\n 'password': 'smart620514',\n 'username': 'efimova@21smart.ru',\n 'phone':''}\n\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n\n # запрос всех ККМ, кроме архивных (headers обязательно !)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}'\n print('-= получение данных с сервера =-')\n r = session.post (API_URL, data=payload, headers=headers)\n\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\")\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
@app.route('/transactions/isfull', methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route('/transactions/new', methods=['POST'])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(), transaction):
return transaction, 200
else:
return jsonify(False), 200
<|reserved_special_token_0|>
@app.route('/chain', methods=['GET'])
def get_chain():
return jsonify(node.chain), 200
@app.route('/pnodes/register', methods=['POST'])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes) == list:
if len(nodes) > 10 and nodes != []:
nodes = nodes[:10]
s = []
f = []
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {'Added PNodes': s, 'Not added pnodes': f}
return jsonify(resp), 200
resp = {'Error': 'Input format error'}
return jsonify(resp), 400
@app.route('/pnodes/size', methods=['GET'])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route('/nodes', methods=['GET'])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
<|reserved_special_token_0|>
@app.route('/chain/resolve', methods=['GET'])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print('Nothing')
return jsonify(r), 400
@app.route('/mine', methods=['GET'])
def mine():
mb = node.mine()
resp = {'Mined_block': mb}
return jsonify(resp), 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/transactions/isfull', methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route('/transactions/new', methods=['POST'])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(), transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route('/chain/last', methods=['GET'])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route('/chain', methods=['GET'])
def get_chain():
return jsonify(node.chain), 200
@app.route('/pnodes/register', methods=['POST'])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes) == list:
if len(nodes) > 10 and nodes != []:
nodes = nodes[:10]
s = []
f = []
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {'Added PNodes': s, 'Not added pnodes': f}
return jsonify(resp), 200
resp = {'Error': 'Input format error'}
return jsonify(resp), 400
@app.route('/pnodes/size', methods=['GET'])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route('/nodes', methods=['GET'])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route('/nodes/resolve', methods=['GET'])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return '0 nodes added', 400
@app.route('/chain/resolve', methods=['GET'])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print('Nothing')
return jsonify(r), 400
@app.route('/mine', methods=['GET'])
def mine():
mb = node.mine()
resp = {'Mined_block': mb}
return jsonify(resp), 200
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,
help='port to listen on')
args = parser.parse_args()
port = args.port
node.port = port
app.run(host='', port=port)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fm = filemanager()
node = Node(fm)
app = Flask(__name__)
@app.route('/transactions/isfull', methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route('/transactions/new', methods=['POST'])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(), transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route('/chain/last', methods=['GET'])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route('/chain', methods=['GET'])
def get_chain():
return jsonify(node.chain), 200
@app.route('/pnodes/register', methods=['POST'])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes) == list:
if len(nodes) > 10 and nodes != []:
nodes = nodes[:10]
s = []
f = []
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {'Added PNodes': s, 'Not added pnodes': f}
return jsonify(resp), 200
resp = {'Error': 'Input format error'}
return jsonify(resp), 400
@app.route('/pnodes/size', methods=['GET'])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route('/nodes', methods=['GET'])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route('/nodes/resolve', methods=['GET'])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return '0 nodes added', 400
@app.route('/chain/resolve', methods=['GET'])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print('Nothing')
return jsonify(r), 400
@app.route('/mine', methods=['GET'])
def mine():
mb = node.mine()
resp = {'Mined_block': mb}
return jsonify(resp), 200
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,
help='port to listen on')
args = parser.parse_args()
port = args.port
node.port = port
app.run(host='', port=port)
<|reserved_special_token_1|>
from flask import Flask, request, jsonify
from app import Node
from dbm2 import filemanager
fm = filemanager()
node = Node(fm)
app = Flask(__name__)
@app.route('/transactions/isfull', methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route('/transactions/new', methods=['POST'])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(), transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route('/chain/last', methods=['GET'])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route('/chain', methods=['GET'])
def get_chain():
return jsonify(node.chain), 200
@app.route('/pnodes/register', methods=['POST'])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes) == list:
if len(nodes) > 10 and nodes != []:
nodes = nodes[:10]
s = []
f = []
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {'Added PNodes': s, 'Not added pnodes': f}
return jsonify(resp), 200
resp = {'Error': 'Input format error'}
return jsonify(resp), 400
@app.route('/pnodes/size', methods=['GET'])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route('/nodes', methods=['GET'])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route('/nodes/resolve', methods=['GET'])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return '0 nodes added', 400
@app.route('/chain/resolve', methods=['GET'])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print('Nothing')
return jsonify(r), 400
@app.route('/mine', methods=['GET'])
def mine():
mb = node.mine()
resp = {'Mined_block': mb}
return jsonify(resp), 200
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,
help='port to listen on')
args = parser.parse_args()
port = args.port
node.port = port
app.run(host='', port=port)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify
from app import Node
from dbm2 import filemanager
fm = filemanager()
node = Node(fm)
app = Flask(__name__)
@app.route("/transactions/isfull",methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route("/transactions/new",methods=["POST"])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(),transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route("/chain/last",methods=["GET"])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route("/chain",methods=["GET"])
def get_chain():
return jsonify(node.chain), 200
@app.route("/pnodes/register",methods=["POST"])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes)==list:
if len(nodes)>10 and nodes!=[]:
nodes = nodes[:10]
s = [] #succeed
f = [] #failed
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {"Added PNodes":s,
"Not added pnodes":f}
return jsonify(resp), 200
resp = {"Error":"Input format error"}
return jsonify(resp), 400
@app.route("/pnodes/size",methods=["GET"])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route("/nodes",methods=["GET"])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route("/nodes/resolve",methods=["GET"])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return "0 nodes added",400
@app.route("/chain/resolve",methods=["GET"])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print("Nothing")
return jsonify(r), 400
@app.route("/mine",methods=["GET"])
def mine():
mb = node.mine()
resp = {"Mined_block":mb}
return jsonify(resp), 200
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--port",default=node.DEFAULT_PORT,type=int,help='port to listen on')
args = parser.parse_args()
port = args.port
node.port=port
app.run(host="",port=port)
|
flexible
|
{
"blob_id": "45b46a08d8b304ac12baf34e0916b249b560418f",
"index": 7459,
"step-1": "<mask token>\n\n\n@app.route('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\n<mask token>\n\n\n@app.route('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\n@app.route('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\n@app.route('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\n@app.route('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\n<mask token>\n\n\n@app.route('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\n@app.route('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\n@app.route('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\n@app.route('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\n@app.route('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\n@app.route('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\n@app.route('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\n@app.route('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-3": "<mask token>\nfm = filemanager()\nnode = Node(fm)\napp = Flask(__name__)\n\n\n@app.route('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\n@app.route('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\n@app.route('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\n@app.route('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\n@app.route('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\n@app.route('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\n@app.route('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\n@app.route('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-4": "from flask import Flask, request, jsonify\nfrom app import Node\nfrom dbm2 import filemanager\nfm = filemanager()\nnode = Node(fm)\napp = Flask(__name__)\n\n\n@app.route('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\n@app.route('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\n@app.route('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\n@app.route('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\n@app.route('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\n@app.route('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\n@app.route('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\n@app.route('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-5": "#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*-\n\n\nfrom flask import Flask, request, jsonify\nfrom app import Node\nfrom dbm2 import filemanager\n\nfm = filemanager()\nnode = Node(fm)\n\napp = Flask(__name__)\n\n@app.route(\"/transactions/isfull\",methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n@app.route(\"/transactions/new\",methods=[\"POST\"])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(),transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n@app.route(\"/chain/last\",methods=[\"GET\"])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n@app.route(\"/chain\",methods=[\"GET\"])\ndef get_chain():\n return jsonify(node.chain), 200\n@app.route(\"/pnodes/register\",methods=[\"POST\"])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes)==list:\n if len(nodes)>10 and nodes!=[]:\n nodes = nodes[:10]\n s = [] #succeed\n f = [] #failed\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {\"Added PNodes\":s,\n \"Not added pnodes\":f}\n return jsonify(resp), 200\n resp = {\"Error\":\"Input format error\"}\n return jsonify(resp), 400\n@app.route(\"/pnodes/size\",methods=[\"GET\"])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n@app.route(\"/nodes\",methods=[\"GET\"])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n@app.route(\"/nodes/resolve\",methods=[\"GET\"])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return \"0 nodes added\",400\n\n@app.route(\"/chain/resolve\",methods=[\"GET\"])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print(\"Nothing\")\n return jsonify(r), 400\n@app.route(\"/mine\",methods=[\"GET\"])\ndef mine():\n mb = node.mine()\n resp = {\"Mined_block\":mb}\n return jsonify(resp), 200\nif __name__==\"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\",\"--port\",default=node.DEFAULT_PORT,type=int,help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port=port\n app.run(host=\"\",port=port)\n \n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(x & y)
print(x >> y)
print(x ^ y)
print(x | y)
<|reserved_special_token_1|>
x = 25
y = 43
print(x & y)
print(x >> y)
print(x ^ y)
print(x | y)
|
flexible
|
{
"blob_id": "34d011727c93bb4c8ccf64017e7185717ef98667",
"index": 2603,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x & y)\nprint(x >> y)\nprint(x ^ y)\nprint(x | y)\n",
"step-3": "x = 25\ny = 43\nprint(x & y)\nprint(x >> y)\nprint(x ^ y)\nprint(x | y)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def make_duplicate_list(filepath):
unique_hashes = {}
duplicate_files = {}
for dir_name, subdir_list, file_list in os.walk(filepath):
for filename in file_list:
path = os.path.join(dir_name, filename)
file_hash = hashfile(path)
if file_hash in unique_hashes:
if file_hash not in duplicate_files:
duplicate_files[file_hash] = []
duplicate_files[file_hash].append(unique_hashes[file_hash])
duplicate_files[file_hash].append(path)
else:
unique_hashes[file_hash] = path
return duplicate_files
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def make_duplicate_list(filepath):
unique_hashes = {}
duplicate_files = {}
for dir_name, subdir_list, file_list in os.walk(filepath):
for filename in file_list:
path = os.path.join(dir_name, filename)
file_hash = hashfile(path)
if file_hash in unique_hashes:
if file_hash not in duplicate_files:
duplicate_files[file_hash] = []
duplicate_files[file_hash].append(unique_hashes[file_hash])
duplicate_files[file_hash].append(path)
else:
unique_hashes[file_hash] = path
return duplicate_files
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='duplicates detector')
parser.add_argument('path_to_folder', help=
'path to folder containig duplicates')
args = parser.parse_args()
path = args.path_to_folder
duplicates = make_duplicate_list(path)
for idx, (key, value) in enumerate(duplicates.items(), 1):
print('{}) {} files with {} MD5 hash were ' + 'found:'.format(idx,
len(value), key))
for idx, folder in enumerate(value, 1):
print(' {}. {}'.format(idx, folder))
<|reserved_special_token_1|>
import os
import hashlib
import argparse
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def make_duplicate_list(filepath):
unique_hashes = {}
duplicate_files = {}
for dir_name, subdir_list, file_list in os.walk(filepath):
for filename in file_list:
path = os.path.join(dir_name, filename)
file_hash = hashfile(path)
if file_hash in unique_hashes:
if file_hash not in duplicate_files:
duplicate_files[file_hash] = []
duplicate_files[file_hash].append(unique_hashes[file_hash])
duplicate_files[file_hash].append(path)
else:
unique_hashes[file_hash] = path
return duplicate_files
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='duplicates detector')
parser.add_argument('path_to_folder', help=
'path to folder containig duplicates')
args = parser.parse_args()
path = args.path_to_folder
duplicates = make_duplicate_list(path)
for idx, (key, value) in enumerate(duplicates.items(), 1):
print('{}) {} files with {} MD5 hash were ' + 'found:'.format(idx,
len(value), key))
for idx, folder in enumerate(value, 1):
print(' {}. {}'.format(idx, folder))
<|reserved_special_token_1|>
import os
import hashlib
import argparse
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def make_duplicate_list(filepath):
unique_hashes = {}
duplicate_files = {}
for dir_name, subdir_list, file_list in os.walk(filepath):
for filename in file_list:
path = os.path.join(dir_name, filename)
file_hash = hashfile(path)
if file_hash in unique_hashes:
if file_hash not in duplicate_files:
# More than 2 duplicate files with same hash can exist,
# so list of filepaths is created.
duplicate_files[file_hash] = []
duplicate_files[file_hash].append(unique_hashes[file_hash])
duplicate_files[file_hash].append(path)
else:
unique_hashes[file_hash] = path
return duplicate_files
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="duplicates detector")
parser.add_argument("path_to_folder",
help="path to folder containig duplicates")
args = parser.parse_args()
path = args.path_to_folder
duplicates = make_duplicate_list(path)
for idx, (key, value) in enumerate(duplicates.items(), 1):
print("{}) {} files with {} MD5 hash were " +
"found:".format(idx, len(value), key))
for idx, folder in enumerate(value, 1):
print(" {}. {}".format(idx, folder))
|
flexible
|
{
"blob_id": "e99c158e54fd86b00e4e045e7fb28d961089800d",
"index": 3289,
"step-1": "<mask token>\n\n\ndef hashfile(path, blocksize=65536):\n afile = open(path, 'rb')\n hasher = hashlib.md5()\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n afile.close()\n return hasher.hexdigest()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef hashfile(path, blocksize=65536):\n afile = open(path, 'rb')\n hasher = hashlib.md5()\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n afile.close()\n return hasher.hexdigest()\n\n\ndef make_duplicate_list(filepath):\n unique_hashes = {}\n duplicate_files = {}\n for dir_name, subdir_list, file_list in os.walk(filepath):\n for filename in file_list:\n path = os.path.join(dir_name, filename)\n file_hash = hashfile(path)\n if file_hash in unique_hashes:\n if file_hash not in duplicate_files:\n duplicate_files[file_hash] = []\n duplicate_files[file_hash].append(unique_hashes[file_hash])\n duplicate_files[file_hash].append(path)\n else:\n unique_hashes[file_hash] = path\n return duplicate_files\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hashfile(path, blocksize=65536):\n afile = open(path, 'rb')\n hasher = hashlib.md5()\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n afile.close()\n return hasher.hexdigest()\n\n\ndef make_duplicate_list(filepath):\n unique_hashes = {}\n duplicate_files = {}\n for dir_name, subdir_list, file_list in os.walk(filepath):\n for filename in file_list:\n path = os.path.join(dir_name, filename)\n file_hash = hashfile(path)\n if file_hash in unique_hashes:\n if file_hash not in duplicate_files:\n duplicate_files[file_hash] = []\n duplicate_files[file_hash].append(unique_hashes[file_hash])\n duplicate_files[file_hash].append(path)\n else:\n unique_hashes[file_hash] = path\n return duplicate_files\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='duplicates detector')\n parser.add_argument('path_to_folder', help=\n 'path to folder containig duplicates')\n args = parser.parse_args()\n path = args.path_to_folder\n duplicates = make_duplicate_list(path)\n for idx, (key, value) in enumerate(duplicates.items(), 1):\n print('{}) {} files with {} MD5 hash were ' + 'found:'.format(idx,\n len(value), key))\n for idx, folder in enumerate(value, 1):\n print(' {}. {}'.format(idx, folder))\n",
"step-4": "import os\nimport hashlib\nimport argparse\n\n\ndef hashfile(path, blocksize=65536):\n afile = open(path, 'rb')\n hasher = hashlib.md5()\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n afile.close()\n return hasher.hexdigest()\n\n\ndef make_duplicate_list(filepath):\n unique_hashes = {}\n duplicate_files = {}\n for dir_name, subdir_list, file_list in os.walk(filepath):\n for filename in file_list:\n path = os.path.join(dir_name, filename)\n file_hash = hashfile(path)\n if file_hash in unique_hashes:\n if file_hash not in duplicate_files:\n duplicate_files[file_hash] = []\n duplicate_files[file_hash].append(unique_hashes[file_hash])\n duplicate_files[file_hash].append(path)\n else:\n unique_hashes[file_hash] = path\n return duplicate_files\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='duplicates detector')\n parser.add_argument('path_to_folder', help=\n 'path to folder containig duplicates')\n args = parser.parse_args()\n path = args.path_to_folder\n duplicates = make_duplicate_list(path)\n for idx, (key, value) in enumerate(duplicates.items(), 1):\n print('{}) {} files with {} MD5 hash were ' + 'found:'.format(idx,\n len(value), key))\n for idx, folder in enumerate(value, 1):\n print(' {}. {}'.format(idx, folder))\n",
"step-5": "import os\nimport hashlib\nimport argparse\n\n\ndef hashfile(path, blocksize=65536):\n afile = open(path, 'rb')\n hasher = hashlib.md5()\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n afile.close()\n return hasher.hexdigest()\n\n\ndef make_duplicate_list(filepath):\n unique_hashes = {}\n duplicate_files = {}\n for dir_name, subdir_list, file_list in os.walk(filepath):\n for filename in file_list:\n path = os.path.join(dir_name, filename)\n file_hash = hashfile(path)\n if file_hash in unique_hashes:\n if file_hash not in duplicate_files:\n # More than 2 duplicate files with same hash can exist,\n # so list of filepaths is created.\n duplicate_files[file_hash] = []\n duplicate_files[file_hash].append(unique_hashes[file_hash])\n duplicate_files[file_hash].append(path)\n else:\n unique_hashes[file_hash] = path\n return duplicate_files\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"duplicates detector\")\n parser.add_argument(\"path_to_folder\",\n help=\"path to folder containig duplicates\")\n args = parser.parse_args()\n path = args.path_to_folder\n duplicates = make_duplicate_list(path)\n for idx, (key, value) in enumerate(duplicates.items(), 1):\n print(\"{}) {} files with {} MD5 hash were \" +\n \"found:\".format(idx, len(value), key))\n for idx, folder in enumerate(value, 1):\n print(\" {}. {}\".format(idx, folder))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def erato(n):
m = int(n ** 0.5)
sieve = [(True) for _ in range(n + 1)]
sieve[1] = False
for i in range(2, m + 1):
if sieve[i]:
for j in range(i + i, n + 1, i):
sieve[j] = False
return sieve
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def erato(n):
m = int(n ** 0.5)
sieve = [(True) for _ in range(n + 1)]
sieve[1] = False
for i in range(2, m + 1):
if sieve[i]:
for j in range(i + i, n + 1, i):
sieve[j] = False
return sieve
input()
<|reserved_special_token_0|>
for i in l:
if prime_l[i]:
ans += 1
print(ans)
<|reserved_special_token_1|>
def erato(n):
m = int(n ** 0.5)
sieve = [(True) for _ in range(n + 1)]
sieve[1] = False
for i in range(2, m + 1):
if sieve[i]:
for j in range(i + i, n + 1, i):
sieve[j] = False
return sieve
input()
l = list(map(int, input().split()))
max_n = max(l)
prime_l = erato(max_n)
ans = 0
for i in l:
if prime_l[i]:
ans += 1
print(ans)
<|reserved_special_token_1|>
def erato(n):
m = int(n ** 0.5)
sieve = [True for _ in range(n+1)]
sieve[1] = False
for i in range(2, m+1):
if sieve[i]:
for j in range(i+i, n+1, i):
sieve[j] = False
return sieve
input()
l = list(map(int, input().split()))
max_n = max(l)
prime_l = erato(max_n)
ans = 0
for i in l:
if prime_l[i]:
ans += 1
print(ans)
|
flexible
|
{
"blob_id": "28eb1d7a698480028fb64827746b3deec0f66a9a",
"index": 6224,
"step-1": "<mask token>\n",
"step-2": "def erato(n):\n m = int(n ** 0.5)\n sieve = [(True) for _ in range(n + 1)]\n sieve[1] = False\n for i in range(2, m + 1):\n if sieve[i]:\n for j in range(i + i, n + 1, i):\n sieve[j] = False\n return sieve\n\n\n<mask token>\n",
"step-3": "def erato(n):\n m = int(n ** 0.5)\n sieve = [(True) for _ in range(n + 1)]\n sieve[1] = False\n for i in range(2, m + 1):\n if sieve[i]:\n for j in range(i + i, n + 1, i):\n sieve[j] = False\n return sieve\n\n\ninput()\n<mask token>\nfor i in l:\n if prime_l[i]:\n ans += 1\nprint(ans)\n",
"step-4": "def erato(n):\n m = int(n ** 0.5)\n sieve = [(True) for _ in range(n + 1)]\n sieve[1] = False\n for i in range(2, m + 1):\n if sieve[i]:\n for j in range(i + i, n + 1, i):\n sieve[j] = False\n return sieve\n\n\ninput()\nl = list(map(int, input().split()))\nmax_n = max(l)\nprime_l = erato(max_n)\nans = 0\nfor i in l:\n if prime_l[i]:\n ans += 1\nprint(ans)\n",
"step-5": "def erato(n):\n m = int(n ** 0.5)\n sieve = [True for _ in range(n+1)]\n sieve[1] = False\n\n for i in range(2, m+1):\n if sieve[i]:\n for j in range(i+i, n+1, i):\n sieve[j] = False\n return sieve\n\ninput()\nl = list(map(int, input().split()))\nmax_n = max(l)\nprime_l = erato(max_n)\nans = 0\nfor i in l:\n if prime_l[i]:\n ans += 1\nprint(ans)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
from django.views.generic import DetailView
from .models import Course
# Create your views here.
def courses_list_view(request):
products = Course.objects.all()
title = "دوره ها"
context = {
"object_list": products,
"title": title,
}
return render(request, "courses/courses_list.html", context)
class CoursesDetailView(DetailView):
queryset = Course.objects.all()
template_name = "courses/course.html"
def get_context_data(self, *args, object_list=None, **kwargs):
context = super(CoursesDetailView, self).get_context_data(*args, **kwargs)
print(context)
return context
|
normal
|
{
"blob_id": "aaa9665ac6d639e681fddd032058f490ce36d12a",
"index": 7684,
"step-1": "<mask token>\n\n\nclass CoursesDetailView(DetailView):\n <mask token>\n <mask token>\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n",
"step-2": "<mask token>\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = 'courses/course.html'\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n",
"step-3": "<mask token>\n\n\ndef courses_list_view(request):\n products = Course.objects.all()\n title = 'دوره ها'\n context = {'object_list': products, 'title': title}\n return render(request, 'courses/courses_list.html', context)\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = 'courses/course.html'\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import DetailView\nfrom .models import Course\n\n\ndef courses_list_view(request):\n products = Course.objects.all()\n title = 'دوره ها'\n context = {'object_list': products, 'title': title}\n return render(request, 'courses/courses_list.html', context)\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = 'courses/course.html'\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import DetailView\nfrom .models import Course\n\n\n# Create your views here.\n\ndef courses_list_view(request):\n products = Course.objects.all()\n title = \"دوره ها\"\n context = {\n \"object_list\": products,\n \"title\": title,\n }\n\n\n return render(request, \"courses/courses_list.html\", context)\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = \"courses/course.html\"\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **kwargs)\n print(context)\n return context\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def buildQueue():
floodQueue = Queue()
for x in range(boardWidth):
for y in range(boardHeight):
if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):
noFrontier = True
if x > 0:
noFrontier = noFrontier & conqueredAt[x - 1][y]
if x < boardWidth - 1:
noFrontier = noFrontier & conqueredAt[x + 1][y]
if y > 0:
noFrontier = noFrontier & conqueredAt[x][y - 1]
if y < boardHeight - 1:
noFrontier = noFrontier & conqueredAt[x][y + 1]
if noFrontier:
neverQueue[x][y] = True
else:
floodQueue.enqueue([x, y])
return floodQueue
<|reserved_special_token_0|>
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Flood it')
generateRandomBoard(boardWidth, boardHeight)
lastPaletteClicked = None
while True:
paletteClicked = None
DISPLAYSURF.fill(bgColor)
drawBoard()
drawPalettes()
pygame.display.update()
for event in pygame.event.get(KEYUP):
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == K_0:
paletteClicked = 9
elif event.key == K_1:
paletteClicked = 0
elif event.key == K_2:
paletteClicked = 1
elif event.key == K_3:
paletteClicked = 2
elif event.key == K_4:
paletteClicked = 3
elif event.key == K_5:
paletteClicked = 4
elif event.key == K_6:
paletteClicked = 5
elif event.key == K_7:
paletteClicked = 6
elif event.key == K_8:
paletteClicked = 7
elif event.key == K_9:
paletteClicked = 8
paletteClicked = random.randint(0, 9)
pygame.time.wait(50)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
lastPaletteClicked = paletteClicked
floodFill(board[0][0], paletteClicked, buildQueue())
drawBoard()
pygame.display.update()
<|reserved_special_token_0|>
def drawBoard():
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,
top, boxSize, boxSize))
DISPLAYSURF.blit(DISPLAYSURF, (0, 0))
def drawPalettes():
numColors = len(paletteColors)
textSize = 30
font = pygame.font.Font(None, textSize)
for i in range(numColors):
top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE
left = 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,
PALETTESIZE, PALETTESIZE))
textImg = font.render(str((i + 1) % 10), 1, bgColor)
DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 -
textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))
<|reserved_special_token_0|>
def leftTopPixelCoordOfBox(boxx, boxy):
xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)
ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)
return boxx * boxSize + xmargin, boxy * boxSize + ymargin
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def buildQueue():
floodQueue = Queue()
for x in range(boardWidth):
for y in range(boardHeight):
if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):
noFrontier = True
if x > 0:
noFrontier = noFrontier & conqueredAt[x - 1][y]
if x < boardWidth - 1:
noFrontier = noFrontier & conqueredAt[x + 1][y]
if y > 0:
noFrontier = noFrontier & conqueredAt[x][y - 1]
if y < boardHeight - 1:
noFrontier = noFrontier & conqueredAt[x][y + 1]
if noFrontier:
neverQueue[x][y] = True
else:
floodQueue.enqueue([x, y])
return floodQueue
<|reserved_special_token_0|>
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Flood it')
generateRandomBoard(boardWidth, boardHeight)
lastPaletteClicked = None
while True:
paletteClicked = None
DISPLAYSURF.fill(bgColor)
drawBoard()
drawPalettes()
pygame.display.update()
for event in pygame.event.get(KEYUP):
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == K_0:
paletteClicked = 9
elif event.key == K_1:
paletteClicked = 0
elif event.key == K_2:
paletteClicked = 1
elif event.key == K_3:
paletteClicked = 2
elif event.key == K_4:
paletteClicked = 3
elif event.key == K_5:
paletteClicked = 4
elif event.key == K_6:
paletteClicked = 5
elif event.key == K_7:
paletteClicked = 6
elif event.key == K_8:
paletteClicked = 7
elif event.key == K_9:
paletteClicked = 8
paletteClicked = random.randint(0, 9)
pygame.time.wait(50)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
lastPaletteClicked = paletteClicked
floodFill(board[0][0], paletteClicked, buildQueue())
drawBoard()
pygame.display.update()
def generateRandomBoard(width, height):
for x in range(width):
column = []
for y in range(height):
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
def drawBoard():
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,
top, boxSize, boxSize))
DISPLAYSURF.blit(DISPLAYSURF, (0, 0))
def drawPalettes():
numColors = len(paletteColors)
textSize = 30
font = pygame.font.Font(None, textSize)
for i in range(numColors):
top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE
left = 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,
PALETTESIZE, PALETTESIZE))
textImg = font.render(str((i + 1) % 10), 1, bgColor)
DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 -
textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))
<|reserved_special_token_0|>
def leftTopPixelCoordOfBox(boxx, boxy):
xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)
ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)
return boxx * boxSize + xmargin, boxy * boxSize + ymargin
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def buildQueue():
floodQueue = Queue()
for x in range(boardWidth):
for y in range(boardHeight):
if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):
noFrontier = True
if x > 0:
noFrontier = noFrontier & conqueredAt[x - 1][y]
if x < boardWidth - 1:
noFrontier = noFrontier & conqueredAt[x + 1][y]
if y > 0:
noFrontier = noFrontier & conqueredAt[x][y - 1]
if y < boardHeight - 1:
noFrontier = noFrontier & conqueredAt[x][y + 1]
if noFrontier:
neverQueue[x][y] = True
else:
floodQueue.enqueue([x, y])
return floodQueue
<|reserved_special_token_0|>
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Flood it')
generateRandomBoard(boardWidth, boardHeight)
lastPaletteClicked = None
while True:
paletteClicked = None
DISPLAYSURF.fill(bgColor)
drawBoard()
drawPalettes()
pygame.display.update()
for event in pygame.event.get(KEYUP):
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == K_0:
paletteClicked = 9
elif event.key == K_1:
paletteClicked = 0
elif event.key == K_2:
paletteClicked = 1
elif event.key == K_3:
paletteClicked = 2
elif event.key == K_4:
paletteClicked = 3
elif event.key == K_5:
paletteClicked = 4
elif event.key == K_6:
paletteClicked = 5
elif event.key == K_7:
paletteClicked = 6
elif event.key == K_8:
paletteClicked = 7
elif event.key == K_9:
paletteClicked = 8
paletteClicked = random.randint(0, 9)
pygame.time.wait(50)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
lastPaletteClicked = paletteClicked
floodFill(board[0][0], paletteClicked, buildQueue())
drawBoard()
pygame.display.update()
def generateRandomBoard(width, height):
for x in range(width):
column = []
for y in range(height):
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
def drawBoard():
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,
top, boxSize, boxSize))
DISPLAYSURF.blit(DISPLAYSURF, (0, 0))
def drawPalettes():
numColors = len(paletteColors)
textSize = 30
font = pygame.font.Font(None, textSize)
for i in range(numColors):
top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE
left = 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,
PALETTESIZE, PALETTESIZE))
textImg = font.render(str((i + 1) % 10), 1, bgColor)
DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 -
textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))
def floodFill(teamColor, newColor, queue):
while queue.isEmpty() == False:
checkHere = queue.dequeue()
x, y = checkHere[0], checkHere[1]
board[x][y] = newColor
conqueredAt[x][y] = True
if x > 0:
X, Y = x - 1, y
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
if x < boardWidth - 1:
X, Y = x + 1, y
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
if y > 0:
X, Y = x, y - 1
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
if y < boardHeight - 1:
X, Y = x, y + 1
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
for x in range(boardWidth):
for y in range(boardHeight):
if conqueredAt[x][y] == True:
board[x][y] = newColor
def leftTopPixelCoordOfBox(boxx, boxy):
xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)
ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)
return boxx * boxSize + xmargin, boxy * boxSize + ymargin
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import random, sys, webbrowser, copy, pygame
from pygame.locals import *
WINDOWWIDTH = 1920
WINDOWHEIGHT = 1000
boxSize = 20
PALETTEGAPSIZE = 5
PALETTESIZE = 30
boardWidth = 93
boardHeight = 49
board = []
conqueredAt = [[(False) for y in range(boardHeight)] for x in range(boardWidth)
]
neverQueue = [[(False) for y in range(boardHeight)] for x in range(boardWidth)]
conqueredAt[0][0] = True
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def buildQueue():
floodQueue = Queue()
for x in range(boardWidth):
for y in range(boardHeight):
if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):
noFrontier = True
if x > 0:
noFrontier = noFrontier & conqueredAt[x - 1][y]
if x < boardWidth - 1:
noFrontier = noFrontier & conqueredAt[x + 1][y]
if y > 0:
noFrontier = noFrontier & conqueredAt[x][y - 1]
if y < boardHeight - 1:
noFrontier = noFrontier & conqueredAt[x][y + 1]
if noFrontier:
neverQueue[x][y] = True
else:
floodQueue.enqueue([x, y])
return floodQueue
WHITE = 255, 255, 255
DARKGRAY = 70, 70, 70
BLACK = 0, 0, 0
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
YELLOW = 255, 255, 0
ORANGE = 255, 128, 0
PURPLE = 255, 0, 255
COLORSCHEMES = (150, 200, 255), (97, 215, 164), (0, 125, 50), (23, 149, 195), (
81, 85, 141), (147, 3, 167), (241, 109, 149), (255, 180, 115), (166, 147, 0
), (183, 182, 208), (68, 0, 0)
bgColor = COLORSCHEMES[0]
paletteColors = COLORSCHEMES[1:]
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Flood it')
generateRandomBoard(boardWidth, boardHeight)
lastPaletteClicked = None
while True:
paletteClicked = None
DISPLAYSURF.fill(bgColor)
drawBoard()
drawPalettes()
pygame.display.update()
for event in pygame.event.get(KEYUP):
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == K_0:
paletteClicked = 9
elif event.key == K_1:
paletteClicked = 0
elif event.key == K_2:
paletteClicked = 1
elif event.key == K_3:
paletteClicked = 2
elif event.key == K_4:
paletteClicked = 3
elif event.key == K_5:
paletteClicked = 4
elif event.key == K_6:
paletteClicked = 5
elif event.key == K_7:
paletteClicked = 6
elif event.key == K_8:
paletteClicked = 7
elif event.key == K_9:
paletteClicked = 8
paletteClicked = random.randint(0, 9)
pygame.time.wait(50)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
lastPaletteClicked = paletteClicked
floodFill(board[0][0], paletteClicked, buildQueue())
drawBoard()
pygame.display.update()
def generateRandomBoard(width, height):
for x in range(width):
column = []
for y in range(height):
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
def drawBoard():
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,
top, boxSize, boxSize))
DISPLAYSURF.blit(DISPLAYSURF, (0, 0))
def drawPalettes():
numColors = len(paletteColors)
textSize = 30
font = pygame.font.Font(None, textSize)
for i in range(numColors):
top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE
left = 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,
PALETTESIZE, PALETTESIZE))
textImg = font.render(str((i + 1) % 10), 1, bgColor)
DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 -
textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))
def floodFill(teamColor, newColor, queue):
while queue.isEmpty() == False:
checkHere = queue.dequeue()
x, y = checkHere[0], checkHere[1]
board[x][y] = newColor
conqueredAt[x][y] = True
if x > 0:
X, Y = x - 1, y
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
if x < boardWidth - 1:
X, Y = x + 1, y
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
if y > 0:
X, Y = x, y - 1
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
if y < boardHeight - 1:
X, Y = x, y + 1
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y])
for x in range(boardWidth):
for y in range(boardHeight):
if conqueredAt[x][y] == True:
board[x][y] = newColor
def leftTopPixelCoordOfBox(boxx, boxy):
xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)
ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)
return boxx * boxSize + xmargin, boxy * boxSize + ymargin
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#
# In development by Jihye Sofia Seo https://www.linkedin.com/in/jihyeseo
# forked from the code of Al Sweigart
# http://inventwithpython.com/pygame/chapter10.html
# whose books are very helpful for learning Python and PyGame. Many thanks!
# Main change is that his version uses flood fill algorithm, which could not run for large boards.
# This file modified the algorithm.
#
# Flood-It is an NP hard problem http://arxiv.org/abs/1001.4420 for 3 colors or more.
# The goal of this project is to find an efficient algorithm for autoplay.
#
# Any comments are welcome at jihyeseo@post.harvard.edu
# upload: May 7 2016 Berlin Germany
#
import random, sys, webbrowser, copy, pygame
from pygame.locals import *
#sys.setrecursionlimit(1000000)
#FPS = 30
WINDOWWIDTH = 1920
WINDOWHEIGHT = 1000
boxSize = 20
PALETTEGAPSIZE = 5
PALETTESIZE = 30
boardWidth = 93
boardHeight = 49
# Creates a board data structure with random colors for each box.
board = []
conqueredAt = [[False for y in range(boardHeight)] for x in range(boardWidth)]
neverQueue = [[False for y in range(boardHeight)] for x in range(boardWidth)]
conqueredAt[0][0] = True
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def buildQueue(): # add only boundaries
floodQueue = Queue()
for x in range(boardWidth):
for y in range(boardHeight):
if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):
noFrontier = True
if (x > 0) :
noFrontier = noFrontier & (conqueredAt[x-1][y])
if (x < boardWidth - 1):
noFrontier = noFrontier & (conqueredAt[x+1][y])
if (y > 0):
noFrontier = noFrontier & (conqueredAt[x][y-1])
if (y < boardHeight - 1):
noFrontier = noFrontier & (conqueredAt[x][y+1])
if noFrontier :
neverQueue[x][y] = True
else:
floodQueue.enqueue([x, y])
return floodQueue
# R G B
WHITE = (255, 255, 255)
DARKGRAY = ( 70, 70, 70)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
# The first color in each scheme is the background color, the next six are the palette colors.
COLORSCHEMES = ((150, 200, 255),
(97, 215, 164) , #lightGr
(0, 125, 50) ,#darkGr
(23, 149, 195) , # light ocean
(81, 85 , 141), # lightPur
(147, 3, 167) , # purple
(241, 109, 149), # jindalle
(255, 180, 115), # tangerine
(166, 147, 0), # tangerine?
(183, 182, 208), # gray
(68, 0, 0) # drak grey
)
bgColor = COLORSCHEMES[0]
paletteColors = COLORSCHEMES[1:]
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Flood it')
generateRandomBoard(boardWidth, boardHeight)
lastPaletteClicked = None
while True: # main game loop
paletteClicked = None
# Draw the screen.
DISPLAYSURF.fill(bgColor)
drawBoard()
drawPalettes()
pygame.display.update()
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
pygame.quit() # terminate if the KEYUP event was for the Esc key
sys.exit()
elif event.key == K_0:
paletteClicked = 9
elif event.key == K_1:
paletteClicked = 0
elif event.key == K_2:
paletteClicked = 1
elif event.key == K_3:
paletteClicked = 2
elif event.key == K_4:
paletteClicked = 3
elif event.key == K_5:
paletteClicked = 4
elif event.key == K_6:
paletteClicked = 5
elif event.key == K_7:
paletteClicked = 6
elif event.key == K_8:
paletteClicked = 7
elif event.key == K_9:
paletteClicked = 8
# pygame.event.post(event) # put the other KEYUP event objects back
paletteClicked = random.randint(0,9)
pygame.time.wait(50)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
# a palette button was clicked that is different from the
# last palette button clicked (this check prevents the player
# from accidentally clicking the same palette twice)
lastPaletteClicked = paletteClicked
#if board[0][0] != paletteClicked :
floodFill(board[0][0], paletteClicked, buildQueue())
drawBoard()
pygame.display.update()
# FPSCLOCK.tick(FPS)
# pygame.display.update()
#FPSCLOCK.tick(FPS)
def generateRandomBoard(width, height):
for x in range(width):
column = []
for y in range(height):
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
def drawBoard():
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
pygame.draw.rect(DISPLAYSURF, (paletteColors[board[x][y]]), (left, top, boxSize, boxSize))
DISPLAYSURF.blit(DISPLAYSURF, (0, 0))
def drawPalettes():
# Draws the six color palettes at the left of the screen.
numColors = len(paletteColors)
textSize = 30
font = pygame.font.Font(None, textSize)
for i in range(numColors):
top = 10 + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)
left = 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE))
textImg = font.render( str((i+1) % 10), 1, bgColor)
DISPLAYSURF.blit( textImg, (left+10 +0*(PALETTESIZE/2-textSize/2),top+7 +0*(PALETTESIZE/2-textSize/2)))
def floodFill(teamColor, newColor, queue):
while(queue.isEmpty() == False):
checkHere = queue.dequeue()
(x,y) = (checkHere[0],checkHere[1])
board[x][y] = newColor
conqueredAt[x][y] = True
if x > 0 :
(X,Y) = (x-1,y)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to the left
if x < boardWidth - 1:
(X,Y) = (x+1,y)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to the right
if y > 0:
(X,Y) = (x,y-1)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to up
if y < boardHeight - 1:
(X,Y) = (x,y+1)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to down
for x in range(boardWidth):
for y in range(boardHeight):
if conqueredAt[x][y] == True :
board[x][y] = newColor
def leftTopPixelCoordOfBox(boxx, boxy):
# Returns the x and y of the left-topmost pixel of the xth & yth box.
xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2 + 23)
ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2 )
return (boxx * boxSize + xmargin, boxy * boxSize + ymargin)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "ec200ee66e3c4a93bbd8e75f0e8b715f54b5479d",
"index": 6781,
"step-1": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\n<mask token>\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\n<mask token>\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\n<mask token>\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\n<mask token>\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\ndef generateRandomBoard(width, height):\n for x in range(width):\n column = []\n for y in range(height):\n column.append(random.randint(0, len(paletteColors) - 1))\n board.append(column)\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\n<mask token>\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\n<mask token>\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\ndef generateRandomBoard(width, height):\n for x in range(width):\n column = []\n for y in range(height):\n column.append(random.randint(0, len(paletteColors) - 1))\n board.append(column)\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\ndef floodFill(teamColor, newColor, queue):\n while queue.isEmpty() == False:\n checkHere = queue.dequeue()\n x, y = checkHere[0], checkHere[1]\n board[x][y] = newColor\n conqueredAt[x][y] = True\n if x > 0:\n X, Y = x - 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if x < boardWidth - 1:\n X, Y = x + 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y > 0:\n X, Y = x, y - 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y < boardHeight - 1:\n X, Y = x, y + 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n for x in range(boardWidth):\n for y in range(boardHeight):\n if conqueredAt[x][y] == True:\n board[x][y] = newColor\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\n<mask token>\n",
"step-4": "import random, sys, webbrowser, copy, pygame\nfrom pygame.locals import *\nWINDOWWIDTH = 1920\nWINDOWHEIGHT = 1000\nboxSize = 20\nPALETTEGAPSIZE = 5\nPALETTESIZE = 30\nboardWidth = 93\nboardHeight = 49\nboard = []\nconqueredAt = [[(False) for y in range(boardHeight)] for x in range(boardWidth)\n ]\nneverQueue = [[(False) for y in range(boardHeight)] for x in range(boardWidth)]\nconqueredAt[0][0] = True\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\nWHITE = 255, 255, 255\nDARKGRAY = 70, 70, 70\nBLACK = 0, 0, 0\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nYELLOW = 255, 255, 0\nORANGE = 255, 128, 0\nPURPLE = 255, 0, 255\nCOLORSCHEMES = (150, 200, 255), (97, 215, 164), (0, 125, 50), (23, 149, 195), (\n 81, 85, 141), (147, 3, 167), (241, 109, 149), (255, 180, 115), (166, 147, 0\n ), (183, 182, 208), (68, 0, 0)\nbgColor = COLORSCHEMES[0]\npaletteColors = COLORSCHEMES[1:]\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\ndef generateRandomBoard(width, height):\n for x in range(width):\n column = []\n for y in range(height):\n column.append(random.randint(0, len(paletteColors) - 1))\n board.append(column)\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\ndef floodFill(teamColor, newColor, queue):\n while queue.isEmpty() == False:\n checkHere = queue.dequeue()\n x, y = checkHere[0], checkHere[1]\n board[x][y] = newColor\n conqueredAt[x][y] = True\n if x > 0:\n X, Y = x - 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if x < boardWidth - 1:\n X, Y = x + 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y > 0:\n X, Y = x, y - 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y < boardHeight - 1:\n X, Y = x, y + 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n for x in range(boardWidth):\n for y in range(boardHeight):\n if conqueredAt[x][y] == True:\n board[x][y] = newColor\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#\r\n# In development by Jihye Sofia Seo https://www.linkedin.com/in/jihyeseo\r\n# forked from the code of Al Sweigart \r\n# http://inventwithpython.com/pygame/chapter10.html \r\n# whose books are very helpful for learning Python and PyGame. Many thanks!\r\n# Main change is that his version uses flood fill algorithm, which could not run for large boards.\r\n# This file modified the algorithm. \r\n#\r\n# Flood-It is an NP hard problem http://arxiv.org/abs/1001.4420 for 3 colors or more. \r\n# The goal of this project is to find an efficient algorithm for autoplay.\r\n#\r\n# Any comments are welcome at jihyeseo@post.harvard.edu \r\n# upload: May 7 2016 Berlin Germany\r\n#\r\n\r\nimport random, sys, webbrowser, copy, pygame\r\nfrom pygame.locals import *\r\n \r\n#sys.setrecursionlimit(1000000)\r\n \r\n#FPS = 30\r\nWINDOWWIDTH = 1920\r\nWINDOWHEIGHT = 1000\r\nboxSize = 20\r\nPALETTEGAPSIZE = 5\r\nPALETTESIZE = 30\r\n \r\nboardWidth = 93\r\nboardHeight = 49 \r\n\r\n# Creates a board data structure with random colors for each box.\r\nboard = []\r\n\r\n\r\nconqueredAt = [[False for y in range(boardHeight)] for x in range(boardWidth)] \r\nneverQueue = [[False for y in range(boardHeight)] for x in range(boardWidth)] \r\n \r\nconqueredAt[0][0] = True \r\n\r\n\r\nclass Queue:\r\n def __init__(self):\r\n self.items = []\r\n\r\n def isEmpty(self):\r\n return self.items == []\r\n\r\n def enqueue(self, item):\r\n self.items.insert(0,item)\r\n\r\n def dequeue(self):\r\n return self.items.pop()\r\n\r\n def size(self):\r\n return len(self.items)\r\n\r\n\r\ndef buildQueue(): # add only boundaries\r\n floodQueue = Queue() \r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True): \r\n noFrontier = True \r\n if (x > 0) :\r\n noFrontier = noFrontier & (conqueredAt[x-1][y]) \r\n if (x < boardWidth - 1):\r\n noFrontier = noFrontier & (conqueredAt[x+1][y])\r\n if (y > 0):\r\n noFrontier = noFrontier & (conqueredAt[x][y-1])\r\n if (y < boardHeight - 1):\r\n noFrontier = noFrontier & (conqueredAt[x][y+1]) \r\n if noFrontier :\r\n neverQueue[x][y] = True\r\n else: \r\n floodQueue.enqueue([x, y]) \r\n \r\n return floodQueue\r\n \r\n# R G B\r\nWHITE = (255, 255, 255)\r\nDARKGRAY = ( 70, 70, 70)\r\nBLACK = ( 0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = ( 0, 255, 0)\r\nBLUE = ( 0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nORANGE = (255, 128, 0)\r\nPURPLE = (255, 0, 255)\r\n# The first color in each scheme is the background color, the next six are the palette colors.\r\nCOLORSCHEMES = ((150, 200, 255), \r\n (97, 215, 164) , #lightGr \r\n (0, 125, 50) ,#darkGr\r\n (23, 149, 195) , # light ocean\r\n (81, 85 , 141), # lightPur\r\n (147, 3, 167) , # purple\r\n (241, 109, 149), # jindalle \r\n (255, 180, 115), # tangerine\r\n (166, 147, 0), # tangerine? \r\n (183, 182, 208), # gray\r\n (68, 0, 0) # drak grey\r\n )\r\nbgColor = COLORSCHEMES[0]\r\npaletteColors = COLORSCHEMES[1:]\r\n\r\ndef main():\r\n global FPSCLOCK, DISPLAYSURF\r\n\r\n pygame.init() \r\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\r\n\r\n pygame.display.set_caption('Flood it')\r\n generateRandomBoard(boardWidth, boardHeight)\r\n lastPaletteClicked = None\r\n\r\n while True: # main game loop\r\n paletteClicked = None\r\n\r\n # Draw the screen.\r\n DISPLAYSURF.fill(bgColor) \r\n drawBoard() \r\n drawPalettes()\r\n pygame.display.update()\r\n \r\n for event in pygame.event.get(KEYUP): # get all the KEYUP events\r\n if event.key == K_ESCAPE:\r\n pygame.quit() # terminate if the KEYUP event was for the Esc key\r\n sys.exit()\r\n elif event.key == K_0:\r\n paletteClicked = 9 \r\n elif event.key == K_1:\r\n paletteClicked = 0 \r\n elif event.key == K_2:\r\n paletteClicked = 1 \r\n elif event.key == K_3:\r\n paletteClicked = 2 \r\n elif event.key == K_4:\r\n paletteClicked = 3 \r\n elif event.key == K_5:\r\n paletteClicked = 4 \r\n elif event.key == K_6:\r\n paletteClicked = 5 \r\n elif event.key == K_7:\r\n paletteClicked = 6 \r\n elif event.key == K_8:\r\n paletteClicked = 7 \r\n elif event.key == K_9:\r\n paletteClicked = 8 \r\n # pygame.event.post(event) # put the other KEYUP event objects back\r\n \r\n paletteClicked = random.randint(0,9)\r\n pygame.time.wait(50)\r\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\r\n # a palette button was clicked that is different from the\r\n # last palette button clicked (this check prevents the player\r\n # from accidentally clicking the same palette twice)\r\n lastPaletteClicked = paletteClicked\r\n #if board[0][0] != paletteClicked : \r\n floodFill(board[0][0], paletteClicked, buildQueue())\r\n drawBoard()\r\n pygame.display.update()\r\n # FPSCLOCK.tick(FPS) \r\n # pygame.display.update()\r\n #FPSCLOCK.tick(FPS)\r\n\r\ndef generateRandomBoard(width, height): \r\n for x in range(width):\r\n column = []\r\n for y in range(height): \r\n column.append(random.randint(0, len(paletteColors) - 1))\r\n board.append(column) \r\n\r\ndef drawBoard():\r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n left, top = leftTopPixelCoordOfBox(x, y) \r\n pygame.draw.rect(DISPLAYSURF, (paletteColors[board[x][y]]), (left, top, boxSize, boxSize))\r\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\r\n\r\ndef drawPalettes():\r\n # Draws the six color palettes at the left of the screen.\r\n numColors = len(paletteColors)\r\n textSize = 30\r\n font = pygame.font.Font(None, textSize)\r\n for i in range(numColors):\r\n top = 10 + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)\r\n left = 10\r\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE))\r\n textImg = font.render( str((i+1) % 10), 1, bgColor)\r\n DISPLAYSURF.blit( textImg, (left+10 +0*(PALETTESIZE/2-textSize/2),top+7 +0*(PALETTESIZE/2-textSize/2)))\r\n \r\ndef floodFill(teamColor, newColor, queue): \r\n while(queue.isEmpty() == False):\r\n checkHere = queue.dequeue() \r\n (x,y) = (checkHere[0],checkHere[1])\r\n \r\n board[x][y] = newColor \r\n conqueredAt[x][y] = True \r\n \r\n if x > 0 :\r\n (X,Y) = (x-1,y) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to the left\r\n if x < boardWidth - 1:\r\n (X,Y) = (x+1,y) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to the right\r\n if y > 0:\r\n (X,Y) = (x,y-1) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to up\r\n if y < boardHeight - 1:\r\n (X,Y) = (x,y+1) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to down\r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n if conqueredAt[x][y] == True :\r\n board[x][y] = newColor \r\n\r\ndef leftTopPixelCoordOfBox(boxx, boxy):\r\n # Returns the x and y of the left-topmost pixel of the xth & yth box.\r\n xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2 + 23)\r\n ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2 )\r\n return (boxx * boxSize + xmargin, boxy * boxSize + ymargin)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n",
"step-ids": [
11,
12,
13,
16,
17
]
}
|
[
11,
12,
13,
16,
17
] |
from IPython import display
display.Image("./image.png")
|
normal
|
{
"blob_id": "3f5096ef5677373a1e436f454109c7b7577c0205",
"index": 6169,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndisplay.Image('./image.png')\n",
"step-3": "from IPython import display\ndisplay.Image('./image.png')\n",
"step-4": "from IPython import display\ndisplay.Image(\"./image.png\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
<|reserved_special_token_0|>
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
<|reserved_special_token_1|>
<|reserved_special_token_0|>
check_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],
bool], _check_password_hash)
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) ->Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')
_CHECK_HASH_ANYWAY = (
'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'
)
check_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],
bool], _check_password_hash)
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) ->Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import functools
from typing import Any, Callable, cast, Dict
from flask import Blueprint, make_response, request, session
from werkzeug.security import check_password_hash as _check_password_hash
from .accesscontrol import PERMISSIONS
from .api import APIError, UserSchema
from .db.models import User
bp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')
_CHECK_HASH_ANYWAY = (
'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'
)
check_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],
bool], _check_password_hash)
@bp.route('/login', methods=('POST',))
def login() ->Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(request.json, partial=('id',
'qualifications') + PERMISSIONS)
username = user_dict['username']
password = user_dict['password']
if is_password_correct(username, password):
user = fetch_user(username)
session['user_id'] = user['id']
response = make_response(user)
response.set_cookie('is_authenticated', '1')
return response
raise APIError(reason='invalid_user_or_password', status_code=403)
@bp.route('/logout', methods=('POST',))
def logout() ->Any:
"""Flask view to log a user out."""
if 'user_id' in session:
del session['user_id']
response = make_response({'success': True})
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) ->bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) ->Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[
..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) ->Any:
user_id = session.get('user_id')
if user_id is None or User.query.get(user_id) is None:
if 'user_id' in session:
del session['user_id']
response = make_response({'reason': 'authentication_required'}, 403
)
response.set_cookie('is_authenticated', max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
<|reserved_special_token_1|>
"""
Authentication views.
login()
Flask view to log a user in.
"""
import functools
from typing import Any, Callable, cast, Dict
from flask import Blueprint, make_response, request, session
from werkzeug.security import check_password_hash as _check_password_hash
from .accesscontrol import PERMISSIONS
from .api import APIError, UserSchema
from .db.models import User
bp = Blueprint("auth", __name__, url_prefix="/api/v1/auth")
_CHECK_HASH_ANYWAY = "pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050" # pylint: disable=line-too-long
check_password_hash: Callable[[str, str], bool] = cast(
Callable[[str, str], bool], _check_password_hash
)
@bp.route("/login", methods=("POST",))
def login() -> Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(
request.json, partial=("id", "qualifications") + PERMISSIONS
)
username = user_dict["username"]
password = user_dict["password"]
if is_password_correct(username, password):
user = fetch_user(username)
session["user_id"] = user["id"]
response = make_response(user)
response.set_cookie("is_authenticated", "1")
return response
raise APIError(reason="invalid_user_or_password", status_code=403)
@bp.route("/logout", methods=("POST",))
def logout() -> Any:
"""Flask view to log a user out."""
if "user_id" in session:
del session["user_id"]
response = make_response({"success": True})
response.set_cookie("is_authenticated", max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) -> bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
# We need to prevent timing-based side-channel attacks
# that could be exploited for user enumeration
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) -> Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) -> Callable[..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) -> Any:
user_id = session.get("user_id")
if user_id is None or User.query.get(user_id) is None:
if "user_id" in session:
del session["user_id"]
response = make_response({"reason": "authentication_required"}, 403)
response.set_cookie("is_authenticated", max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
|
flexible
|
{
"blob_id": "2d36ae916ad257615016ed6c0bc67e506ee313c9",
"index": 1528,
"step-1": "<mask token>\n\n\n@bp.route('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\n@bp.route('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\n<mask token>\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-2": "<mask token>\ncheck_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],\n bool], _check_password_hash)\n\n\n@bp.route('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\n@bp.route('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) ->Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-3": "<mask token>\nbp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')\n_CHECK_HASH_ANYWAY = (\n 'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'\n )\ncheck_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],\n bool], _check_password_hash)\n\n\n@bp.route('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\n@bp.route('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) ->Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-4": "<mask token>\nimport functools\nfrom typing import Any, Callable, cast, Dict\nfrom flask import Blueprint, make_response, request, session\nfrom werkzeug.security import check_password_hash as _check_password_hash\nfrom .accesscontrol import PERMISSIONS\nfrom .api import APIError, UserSchema\nfrom .db.models import User\nbp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')\n_CHECK_HASH_ANYWAY = (\n 'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'\n )\ncheck_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],\n bool], _check_password_hash)\n\n\n@bp.route('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\n@bp.route('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) ->Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-5": "\"\"\"\nAuthentication views.\n\nlogin()\n Flask view to log a user in.\n\"\"\"\n\nimport functools\nfrom typing import Any, Callable, cast, Dict\n\nfrom flask import Blueprint, make_response, request, session\nfrom werkzeug.security import check_password_hash as _check_password_hash\n\nfrom .accesscontrol import PERMISSIONS\nfrom .api import APIError, UserSchema\nfrom .db.models import User\n\n\nbp = Blueprint(\"auth\", __name__, url_prefix=\"/api/v1/auth\")\n\n_CHECK_HASH_ANYWAY = \"pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050\" # pylint: disable=line-too-long\n\ncheck_password_hash: Callable[[str, str], bool] = cast(\n Callable[[str, str], bool], _check_password_hash\n)\n\n\n@bp.route(\"/login\", methods=(\"POST\",))\ndef login() -> Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(\n request.json, partial=(\"id\", \"qualifications\") + PERMISSIONS\n )\n username = user_dict[\"username\"]\n password = user_dict[\"password\"]\n\n if is_password_correct(username, password):\n user = fetch_user(username)\n session[\"user_id\"] = user[\"id\"]\n response = make_response(user)\n response.set_cookie(\"is_authenticated\", \"1\")\n return response\n\n raise APIError(reason=\"invalid_user_or_password\", status_code=403)\n\n\n@bp.route(\"/logout\", methods=(\"POST\",))\ndef logout() -> Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if \"user_id\" in session:\n del session[\"user_id\"]\n response = make_response({\"success\": True})\n response.set_cookie(\"is_authenticated\", max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) -> bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n # We need to prevent timing-based side-channel attacks\n # that could be exploited for user enumeration\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) -> Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n user_id = session.get(\"user_id\")\n if user_id is None or User.query.get(user_id) is None:\n if \"user_id\" in session:\n del session[\"user_id\"]\n response = make_response({\"reason\": \"authentication_required\"}, 403)\n response.set_cookie(\"is_authenticated\", max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n\n return wrapper\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#####################将政策文件中的内容抽取出来:标准、伦理、 3部分内容##########################
###########step 1:把3部分内容找到近义词,组成一个词表######
###########step 2:把文件与词表相匹配,判断文件到底在讲啥######
from nltk.corpus import wordnet as wn
import os
import codecs
# goods = wn.synsets('beautiful')
# beautifuls = wn.synsets('pretty')
# bads = wn.synsets('standard')
# print('good和bad的语义相似度为: ', max([0 if good.path_similarity(bad) == None else good.path_similarity(bad) for good in goods for bad in bads]))
def readOnePolicy(path2):
ethic_set = wn.synsets('ethic')
# print('ethic的同义词集为:', ethic_set)
# print('ethic的各同义词集包含的单词有:', [ethic.lemma_names() for ethic in ethic_set])
# print('ethic的各同义词集的具体定义是:',[dog.definition() for dog in ethic_set])
# print('ethic的各同义词集的例子是:',[dog.examples() for dog in ethic_set])
standard_set = wn.synsets('standard')
privacy_set = wn.synsets('privacy')
education_set = wn.synsets('education')
investment_set = wn.synsets('investment')
application_set = wn.synsets('application')
content=''
# with open(path2,'r',encoding='UTF-8') as f1:
# with open(path2, 'r', encoding='UTF-8') as f1:
with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:###这里用codecs防止编码出错
content=fr.read()
content=content.split()
# print(type(content))
# content = wn.synsets('standard')
# print('good和beautiful的语义相似度为: ', max([0 if one_ethic.path_similarity(one_word) == None else one_ethic.path_similarity(one_word) for one_ethic in ethic_set for one_word in content]))
#
# for ethic in ethic_set:
# # print(type(ethic.lemma_names()))##list
# for one_word in range(len(ethic.lemma_names())):
# print(ethic.lemma_names()[one_word])
# print('content和ethic的语义相似度为: ', max([0 if good.path_similarity(beautiful) == None else good.path_similarity(beautiful) for good in goods for beautiful in beautifuls]))
stop_words=''
with open('stopWords.txt','r') as f2:
stop_words=f2.read()
stop_words=stop_words.split()
ethic_max_prob = 0
standard_max_prob = 0
privacy_max_prob = 0
education_max_prob = 0
investment_max_prob = 0
application_max_prob = 0
for i in range(len(content)):
contentSyns=[]
if content[i] not in stop_words:
if not content[i].isnumeric():
# print(content[i],' content[i]')
contentSyns=wn.synsets(content[i])
# print(contentSyns,' contentsyns')###contentSyns有些是空的[],下面max()会报错
if len(contentSyns)>0:
ethic_prob=max([0 if e.path_similarity(c) == None else e.path_similarity(c) for e in ethic_set for c in contentSyns])
standard_prob = max([0 if s.path_similarity(c) == None else s.path_similarity(c) for s in standard_set for c in contentSyns])
privacy_prob = max([0 if p.path_similarity(c) == None else p.path_similarity(c) for p in privacy_set for c in contentSyns])
education_prob = max([0 if edu.path_similarity(c) == None else edu.path_similarity(c) for edu in education_set for c in contentSyns])
investment_prob = max([0 if i.path_similarity(c) == None else i.path_similarity(c) for i in investment_set for c in contentSyns])
application_prob = max([0 if a.path_similarity(c) == None else a.path_similarity(c) for a in application_set for c in contentSyns])
if ethic_prob>ethic_max_prob:
ethic_max_prob=ethic_prob
if standard_prob>standard_max_prob:
standard_max_prob=standard_prob
if privacy_prob>privacy_max_prob:
privacy_max_prob=privacy_prob
if education_prob > education_max_prob:
education_max_prob = education_prob
if investment_prob > investment_max_prob:
investment_max_prob = investment_prob
if application_prob > application_max_prob:
application_max_prob = application_prob
# print(max_prob,' 概率')
# print(ethic_max_prob,' ethic_max_prob')
# print(standard_max_prob,' standard_max_prob')
# print(privacy_max_prob,' privacy_max_prob')
print(path2,' ',ethic_max_prob,' ',standard_max_prob,' ',privacy_max_prob,' ',education_max_prob,' ',investment_max_prob,' ',application_max_prob)
file_dir = r"txt"
for root, dirs, files in os.walk(file_dir):
for f in range(len(files)):
path1=os.path.join(file_dir,files[f])
# print(path1,' doc_name')
readOnePolicy(path1)
# with open(path1, 'r') as f1:
# content = f1.read()
|
normal
|
{
"blob_id": "caca4309034f08874e1e32828a601e7e3d4d3efd",
"index": 2058,
"step-1": "<mask token>\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\n<mask token>\nfor root, dirs, files in os.walk(file_dir):\n for f in range(len(files)):\n path1 = os.path.join(file_dir, files[f])\n readOnePolicy(path1)\n",
"step-3": "<mask token>\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\nfile_dir = 'txt'\nfor root, dirs, files in os.walk(file_dir):\n for f in range(len(files)):\n path1 = os.path.join(file_dir, files[f])\n readOnePolicy(path1)\n",
"step-4": "from nltk.corpus import wordnet as wn\nimport os\nimport codecs\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\nfile_dir = 'txt'\nfor root, dirs, files in os.walk(file_dir):\n for f in range(len(files)):\n path1 = os.path.join(file_dir, files[f])\n readOnePolicy(path1)\n",
"step-5": "#####################将政策文件中的内容抽取出来:标准、伦理、 3部分内容##########################\r\n###########step 1:把3部分内容找到近义词,组成一个词表######\r\n###########step 2:把文件与词表相匹配,判断文件到底在讲啥######\r\nfrom nltk.corpus import wordnet as wn\r\nimport os\r\nimport codecs\r\n# goods = wn.synsets('beautiful')\r\n# beautifuls = wn.synsets('pretty')\r\n# bads = wn.synsets('standard')\r\n\r\n# print('good和bad的语义相似度为: ', max([0 if good.path_similarity(bad) == None else good.path_similarity(bad) for good in goods for bad in bads]))\r\ndef readOnePolicy(path2):\r\n ethic_set = wn.synsets('ethic')\r\n # print('ethic的同义词集为:', ethic_set)\r\n # print('ethic的各同义词集包含的单词有:', [ethic.lemma_names() for ethic in ethic_set])\r\n # print('ethic的各同义词集的具体定义是:',[dog.definition() for dog in ethic_set])\r\n # print('ethic的各同义词集的例子是:',[dog.examples() for dog in ethic_set])\r\n standard_set = wn.synsets('standard')\r\n privacy_set = wn.synsets('privacy')\r\n education_set = wn.synsets('education')\r\n investment_set = wn.synsets('investment')\r\n application_set = wn.synsets('application')\r\n content=''\r\n # with open(path2,'r',encoding='UTF-8') as f1:\r\n # with open(path2, 'r', encoding='UTF-8') as f1:\r\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:###这里用codecs防止编码出错\r\n content=fr.read()\r\n content=content.split()\r\n # print(type(content))\r\n\r\n # content = wn.synsets('standard')\r\n\r\n # print('good和beautiful的语义相似度为: ', max([0 if one_ethic.path_similarity(one_word) == None else one_ethic.path_similarity(one_word) for one_ethic in ethic_set for one_word in content]))\r\n #\r\n # for ethic in ethic_set:\r\n # # print(type(ethic.lemma_names()))##list\r\n # for one_word in range(len(ethic.lemma_names())):\r\n # print(ethic.lemma_names()[one_word])\r\n # print('content和ethic的语义相似度为: ', max([0 if good.path_similarity(beautiful) == None else good.path_similarity(beautiful) for good in goods for beautiful in beautifuls]))\r\n stop_words=''\r\n with open('stopWords.txt','r') as f2:\r\n stop_words=f2.read()\r\n stop_words=stop_words.split()\r\n\r\n ethic_max_prob = 0\r\n standard_max_prob = 0\r\n privacy_max_prob = 0\r\n education_max_prob = 0\r\n investment_max_prob = 0\r\n application_max_prob = 0\r\n for i in range(len(content)):\r\n contentSyns=[]\r\n if content[i] not in stop_words:\r\n if not content[i].isnumeric():\r\n # print(content[i],' content[i]')\r\n contentSyns=wn.synsets(content[i])\r\n # print(contentSyns,' contentsyns')###contentSyns有些是空的[],下面max()会报错\r\n if len(contentSyns)>0:\r\n ethic_prob=max([0 if e.path_similarity(c) == None else e.path_similarity(c) for e in ethic_set for c in contentSyns])\r\n standard_prob = max([0 if s.path_similarity(c) == None else s.path_similarity(c) for s in standard_set for c in contentSyns])\r\n privacy_prob = max([0 if p.path_similarity(c) == None else p.path_similarity(c) for p in privacy_set for c in contentSyns])\r\n education_prob = max([0 if edu.path_similarity(c) == None else edu.path_similarity(c) for edu in education_set for c in contentSyns])\r\n investment_prob = max([0 if i.path_similarity(c) == None else i.path_similarity(c) for i in investment_set for c in contentSyns])\r\n application_prob = max([0 if a.path_similarity(c) == None else a.path_similarity(c) for a in application_set for c in contentSyns])\r\n\r\n if ethic_prob>ethic_max_prob:\r\n ethic_max_prob=ethic_prob\r\n if standard_prob>standard_max_prob:\r\n standard_max_prob=standard_prob\r\n if privacy_prob>privacy_max_prob:\r\n privacy_max_prob=privacy_prob\r\n if education_prob > education_max_prob:\r\n education_max_prob = education_prob\r\n if investment_prob > investment_max_prob:\r\n investment_max_prob = investment_prob\r\n if application_prob > application_max_prob:\r\n application_max_prob = application_prob\r\n\r\n\r\n # print(max_prob,' 概率')\r\n\r\n # print(ethic_max_prob,' ethic_max_prob')\r\n # print(standard_max_prob,' standard_max_prob')\r\n # print(privacy_max_prob,' privacy_max_prob')\r\n print(path2,' ',ethic_max_prob,' ',standard_max_prob,' ',privacy_max_prob,' ',education_max_prob,' ',investment_max_prob,' ',application_max_prob)\r\n\r\nfile_dir = r\"txt\"\r\nfor root, dirs, files in os.walk(file_dir):\r\n for f in range(len(files)):\r\n path1=os.path.join(file_dir,files[f])\r\n # print(path1,' doc_name')\r\n readOnePolicy(path1)\r\n # with open(path1, 'r') as f1:\r\n # content = f1.read()\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def getDates():
dates = store.mapStore('dates')
data = store.mapStore('data')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
try:
d0 = date(2020, 1, 13)
d1 = data[0, FIRST:]
i = 0
newdates = []
while i <= d1.shape[0] - 1:
diffday = np.datetime64(d0 + timedelta(days=i))
newdates.append(diffday)
i += 1
newdates = np.concatenate((dates, newdates))
store.updateStore(dates=newdates)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Problems with handling data numpy array')
print(e)
return True
def addDataToDB(conn, filterData):
data = store.mapStore('data')
dates = store.mapStore('dates')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
dataValues = data[1:, FIRST:]
datesValues = dates
if filterData is not None:
datesValues = datesValues[filterData]
dataValues = dataValues[:, filterData]
sql = (
'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'
)
for ix, iy in np.ndindex(dataValues.shape):
try:
date = datesValues[iy].astype(datetime)
values = data[ix + 1, :FIRST]
values = tuple(values.tolist())
item = dataValues[ix, iy].item()
try:
item = float(item)
except:
item = None
values = values + tuple([date, item])
cursor = conn.cursor()
cursor.execute(sql, values)
conn.commit()
cursor.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
exceptions = store.mapStore('exceptions')
exceptions.append(error)
def addPercentileMessageToDB():
data = store.mapStore('data')
states_walking = filters.filterStates(data[1:, :])
states_driving = filters.filterStates(data[1:, :], 'driving')
states_transit = filters.filterStates(data[1:, :], 'transit')
underq1, overq1, percentile_walking_25 = filters.filterPercentiles(
states_walking, 25)
undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(
states_walking, 50)
underq3, overq3, percentile_walking_75 = filters.filterPercentiles(
states_walking, 75)
underq1_driving, overq1_driving, percentile_driving_25 = (filters.
filterPercentiles(states_driving, 25))
undermedian_driving, overmedian_driving, percentile_driving_50 = (filters
.filterPercentiles(states_driving, 50))
underq3_driving, overq3_driving, percentile_driving_75 = (filters.
filterPercentiles(states_driving, 75))
underq1_transit, overq1_transit, percentile_transit_25 = (filters.
filterPercentiles(states_transit, 25))
undermedian_transit, overmedian_transit, percentile_transit_50 = (filters
.filterPercentiles(states_transit, 50))
underq3_transit, overq3_transit, percentile_transit_75 = (filters.
filterPercentiles(states_transit, 75))
over100_waling = filters.filerOver100(states_walking)
underq1_states = states_walking[underq1, 1]
overq3_states = states_walking[overq3, 1]
over100_states = states_walking[over100_waling, 1]
over100_driving = filters.filerOver100(states_driving)
underq1_states_driving = states_driving[underq1_driving, 1]
overq3_states_driving = states_driving[overq3_driving, 1]
over100_states_driving = states_driving[over100_driving, 1]
over100_transit = filters.filerOver100(states_transit)
underq1_states_transit = states_transit[underq1_transit, 1]
overq3_states_transit = states_transit[overq3_transit, 1]
over100_states_transit = states_transit[over100_transit, 1]
print('walking under 25 percentile (far to normal) ' +
percentile_walking_25.astype(np.str))
print(underq1_states)
print('walking over 75 percentile (over normal trnasportation) ' +
percentile_walking_75.astype(np.str))
print(overq3_states)
print('walking over 100 in comparison to 13.1.2020')
print(over100_states)
print('Median value is ' + percentile_walking_50.astype(np.str))
print(' ')
print('Driving under 25 percentile (far to normal) ' +
percentile_driving_25.astype(np.str))
print(underq1_states_driving)
print('Driving over 75 percentile (over normal trnasportation) ',
percentile_driving_75.astype(np.str))
print(overq3_states_driving)
print('Driving over 100% in comparison to 13.1.2020')
print(over100_states_driving)
print('Median value is ' + percentile_driving_50.astype(np.str))
print(' ')
print('Transit under 25 percentile (far to normal) ' +
percentile_transit_25.astype(np.str))
print(underq1_states_transit)
print('Transit over 75 percentile (over normal trnasportation) ',
percentile_transit_75.astype(np.str))
print(overq3_states_transit.astype(np.str))
print('Transit over 100 in comparison to 13.1.2020')
print(over100_states_transit)
print('Median value is ' + percentile_transit_50.astype(np.str))
print(' ')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getData():
today = store.mapStore('today')
npdata = store.mapStore('data')
filedate = np.datetime64(today - timedelta(days=2))
try:
url = (
'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'
.format(filedate))
download = requests.get(url)
download.encoding = 'utf-8'
temp_file = open('temp/temp.csv', 'w', encoding='utf8')
temp_file.writelines(download.text)
npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,
encoding='utf8', invalid_raise=False, missing_values=np.nan,
filling_values=np.nan)
temp_file.close()
store.updateStore(data=npcsv)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Not possible to read csv file .')
print(e)
def getDates():
dates = store.mapStore('dates')
data = store.mapStore('data')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
try:
d0 = date(2020, 1, 13)
d1 = data[0, FIRST:]
i = 0
newdates = []
while i <= d1.shape[0] - 1:
diffday = np.datetime64(d0 + timedelta(days=i))
newdates.append(diffday)
i += 1
newdates = np.concatenate((dates, newdates))
store.updateStore(dates=newdates)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Problems with handling data numpy array')
print(e)
return True
def addDataToDB(conn, filterData):
data = store.mapStore('data')
dates = store.mapStore('dates')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
dataValues = data[1:, FIRST:]
datesValues = dates
if filterData is not None:
datesValues = datesValues[filterData]
dataValues = dataValues[:, filterData]
sql = (
'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'
)
for ix, iy in np.ndindex(dataValues.shape):
try:
date = datesValues[iy].astype(datetime)
values = data[ix + 1, :FIRST]
values = tuple(values.tolist())
item = dataValues[ix, iy].item()
try:
item = float(item)
except:
item = None
values = values + tuple([date, item])
cursor = conn.cursor()
cursor.execute(sql, values)
conn.commit()
cursor.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
exceptions = store.mapStore('exceptions')
exceptions.append(error)
def addPercentileMessageToDB():
data = store.mapStore('data')
states_walking = filters.filterStates(data[1:, :])
states_driving = filters.filterStates(data[1:, :], 'driving')
states_transit = filters.filterStates(data[1:, :], 'transit')
underq1, overq1, percentile_walking_25 = filters.filterPercentiles(
states_walking, 25)
undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(
states_walking, 50)
underq3, overq3, percentile_walking_75 = filters.filterPercentiles(
states_walking, 75)
underq1_driving, overq1_driving, percentile_driving_25 = (filters.
filterPercentiles(states_driving, 25))
undermedian_driving, overmedian_driving, percentile_driving_50 = (filters
.filterPercentiles(states_driving, 50))
underq3_driving, overq3_driving, percentile_driving_75 = (filters.
filterPercentiles(states_driving, 75))
underq1_transit, overq1_transit, percentile_transit_25 = (filters.
filterPercentiles(states_transit, 25))
undermedian_transit, overmedian_transit, percentile_transit_50 = (filters
.filterPercentiles(states_transit, 50))
underq3_transit, overq3_transit, percentile_transit_75 = (filters.
filterPercentiles(states_transit, 75))
over100_waling = filters.filerOver100(states_walking)
underq1_states = states_walking[underq1, 1]
overq3_states = states_walking[overq3, 1]
over100_states = states_walking[over100_waling, 1]
over100_driving = filters.filerOver100(states_driving)
underq1_states_driving = states_driving[underq1_driving, 1]
overq3_states_driving = states_driving[overq3_driving, 1]
over100_states_driving = states_driving[over100_driving, 1]
over100_transit = filters.filerOver100(states_transit)
underq1_states_transit = states_transit[underq1_transit, 1]
overq3_states_transit = states_transit[overq3_transit, 1]
over100_states_transit = states_transit[over100_transit, 1]
print('walking under 25 percentile (far to normal) ' +
percentile_walking_25.astype(np.str))
print(underq1_states)
print('walking over 75 percentile (over normal trnasportation) ' +
percentile_walking_75.astype(np.str))
print(overq3_states)
print('walking over 100 in comparison to 13.1.2020')
print(over100_states)
print('Median value is ' + percentile_walking_50.astype(np.str))
print(' ')
print('Driving under 25 percentile (far to normal) ' +
percentile_driving_25.astype(np.str))
print(underq1_states_driving)
print('Driving over 75 percentile (over normal trnasportation) ',
percentile_driving_75.astype(np.str))
print(overq3_states_driving)
print('Driving over 100% in comparison to 13.1.2020')
print(over100_states_driving)
print('Median value is ' + percentile_driving_50.astype(np.str))
print(' ')
print('Transit under 25 percentile (far to normal) ' +
percentile_transit_25.astype(np.str))
print(underq1_states_transit)
print('Transit over 75 percentile (over normal trnasportation) ',
percentile_transit_75.astype(np.str))
print(overq3_states_transit.astype(np.str))
print('Transit over 100 in comparison to 13.1.2020')
print(over100_states_transit)
print('Median value is ' + percentile_transit_50.astype(np.str))
print(' ')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FIRST = 4
def prepareDate():
pc_tz = timezone('US/Pacific')
n = datetime.now(pc_tz)
nd = n.date()
store.updateStore(today=nd)
def getData():
today = store.mapStore('today')
npdata = store.mapStore('data')
filedate = np.datetime64(today - timedelta(days=2))
try:
url = (
'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'
.format(filedate))
download = requests.get(url)
download.encoding = 'utf-8'
temp_file = open('temp/temp.csv', 'w', encoding='utf8')
temp_file.writelines(download.text)
npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,
encoding='utf8', invalid_raise=False, missing_values=np.nan,
filling_values=np.nan)
temp_file.close()
store.updateStore(data=npcsv)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Not possible to read csv file .')
print(e)
def getDates():
dates = store.mapStore('dates')
data = store.mapStore('data')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
try:
d0 = date(2020, 1, 13)
d1 = data[0, FIRST:]
i = 0
newdates = []
while i <= d1.shape[0] - 1:
diffday = np.datetime64(d0 + timedelta(days=i))
newdates.append(diffday)
i += 1
newdates = np.concatenate((dates, newdates))
store.updateStore(dates=newdates)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Problems with handling data numpy array')
print(e)
return True
def addDataToDB(conn, filterData):
data = store.mapStore('data')
dates = store.mapStore('dates')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
dataValues = data[1:, FIRST:]
datesValues = dates
if filterData is not None:
datesValues = datesValues[filterData]
dataValues = dataValues[:, filterData]
sql = (
'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'
)
for ix, iy in np.ndindex(dataValues.shape):
try:
date = datesValues[iy].astype(datetime)
values = data[ix + 1, :FIRST]
values = tuple(values.tolist())
item = dataValues[ix, iy].item()
try:
item = float(item)
except:
item = None
values = values + tuple([date, item])
cursor = conn.cursor()
cursor.execute(sql, values)
conn.commit()
cursor.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
exceptions = store.mapStore('exceptions')
exceptions.append(error)
def addPercentileMessageToDB():
data = store.mapStore('data')
states_walking = filters.filterStates(data[1:, :])
states_driving = filters.filterStates(data[1:, :], 'driving')
states_transit = filters.filterStates(data[1:, :], 'transit')
underq1, overq1, percentile_walking_25 = filters.filterPercentiles(
states_walking, 25)
undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(
states_walking, 50)
underq3, overq3, percentile_walking_75 = filters.filterPercentiles(
states_walking, 75)
underq1_driving, overq1_driving, percentile_driving_25 = (filters.
filterPercentiles(states_driving, 25))
undermedian_driving, overmedian_driving, percentile_driving_50 = (filters
.filterPercentiles(states_driving, 50))
underq3_driving, overq3_driving, percentile_driving_75 = (filters.
filterPercentiles(states_driving, 75))
underq1_transit, overq1_transit, percentile_transit_25 = (filters.
filterPercentiles(states_transit, 25))
undermedian_transit, overmedian_transit, percentile_transit_50 = (filters
.filterPercentiles(states_transit, 50))
underq3_transit, overq3_transit, percentile_transit_75 = (filters.
filterPercentiles(states_transit, 75))
over100_waling = filters.filerOver100(states_walking)
underq1_states = states_walking[underq1, 1]
overq3_states = states_walking[overq3, 1]
over100_states = states_walking[over100_waling, 1]
over100_driving = filters.filerOver100(states_driving)
underq1_states_driving = states_driving[underq1_driving, 1]
overq3_states_driving = states_driving[overq3_driving, 1]
over100_states_driving = states_driving[over100_driving, 1]
over100_transit = filters.filerOver100(states_transit)
underq1_states_transit = states_transit[underq1_transit, 1]
overq3_states_transit = states_transit[overq3_transit, 1]
over100_states_transit = states_transit[over100_transit, 1]
print('walking under 25 percentile (far to normal) ' +
percentile_walking_25.astype(np.str))
print(underq1_states)
print('walking over 75 percentile (over normal trnasportation) ' +
percentile_walking_75.astype(np.str))
print(overq3_states)
print('walking over 100 in comparison to 13.1.2020')
print(over100_states)
print('Median value is ' + percentile_walking_50.astype(np.str))
print(' ')
print('Driving under 25 percentile (far to normal) ' +
percentile_driving_25.astype(np.str))
print(underq1_states_driving)
print('Driving over 75 percentile (over normal trnasportation) ',
percentile_driving_75.astype(np.str))
print(overq3_states_driving)
print('Driving over 100% in comparison to 13.1.2020')
print(over100_states_driving)
print('Median value is ' + percentile_driving_50.astype(np.str))
print(' ')
print('Transit under 25 percentile (far to normal) ' +
percentile_transit_25.astype(np.str))
print(underq1_states_transit)
print('Transit over 75 percentile (over normal trnasportation) ',
percentile_transit_75.astype(np.str))
print(overq3_states_transit.astype(np.str))
print('Transit over 100 in comparison to 13.1.2020')
print(over100_states_transit)
print('Median value is ' + percentile_transit_50.astype(np.str))
print(' ')
<|reserved_special_token_1|>
import numpy as np
from datetime import date, timedelta, datetime
from pytz import timezone
import store
import psycopg2
import requests
import os
import filters
FIRST = 4
def prepareDate():
pc_tz = timezone('US/Pacific')
n = datetime.now(pc_tz)
nd = n.date()
store.updateStore(today=nd)
def getData():
today = store.mapStore('today')
npdata = store.mapStore('data')
filedate = np.datetime64(today - timedelta(days=2))
try:
url = (
'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'
.format(filedate))
download = requests.get(url)
download.encoding = 'utf-8'
temp_file = open('temp/temp.csv', 'w', encoding='utf8')
temp_file.writelines(download.text)
npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,
encoding='utf8', invalid_raise=False, missing_values=np.nan,
filling_values=np.nan)
temp_file.close()
store.updateStore(data=npcsv)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Not possible to read csv file .')
print(e)
def getDates():
dates = store.mapStore('dates')
data = store.mapStore('data')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
try:
d0 = date(2020, 1, 13)
d1 = data[0, FIRST:]
i = 0
newdates = []
while i <= d1.shape[0] - 1:
diffday = np.datetime64(d0 + timedelta(days=i))
newdates.append(diffday)
i += 1
newdates = np.concatenate((dates, newdates))
store.updateStore(dates=newdates)
except Exception as e:
exceptions = store.mapStore('exceptions')
exceptions.append(e)
print('Problems with handling data numpy array')
print(e)
return True
def addDataToDB(conn, filterData):
data = store.mapStore('data')
dates = store.mapStore('dates')
exceptions = store.mapStore('exceptions')
if len(exceptions) > 0:
return False
dataValues = data[1:, FIRST:]
datesValues = dates
if filterData is not None:
datesValues = datesValues[filterData]
dataValues = dataValues[:, filterData]
sql = (
'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'
)
for ix, iy in np.ndindex(dataValues.shape):
try:
date = datesValues[iy].astype(datetime)
values = data[ix + 1, :FIRST]
values = tuple(values.tolist())
item = dataValues[ix, iy].item()
try:
item = float(item)
except:
item = None
values = values + tuple([date, item])
cursor = conn.cursor()
cursor.execute(sql, values)
conn.commit()
cursor.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
exceptions = store.mapStore('exceptions')
exceptions.append(error)
def addPercentileMessageToDB():
data = store.mapStore('data')
states_walking = filters.filterStates(data[1:, :])
states_driving = filters.filterStates(data[1:, :], 'driving')
states_transit = filters.filterStates(data[1:, :], 'transit')
underq1, overq1, percentile_walking_25 = filters.filterPercentiles(
states_walking, 25)
undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(
states_walking, 50)
underq3, overq3, percentile_walking_75 = filters.filterPercentiles(
states_walking, 75)
underq1_driving, overq1_driving, percentile_driving_25 = (filters.
filterPercentiles(states_driving, 25))
undermedian_driving, overmedian_driving, percentile_driving_50 = (filters
.filterPercentiles(states_driving, 50))
underq3_driving, overq3_driving, percentile_driving_75 = (filters.
filterPercentiles(states_driving, 75))
underq1_transit, overq1_transit, percentile_transit_25 = (filters.
filterPercentiles(states_transit, 25))
undermedian_transit, overmedian_transit, percentile_transit_50 = (filters
.filterPercentiles(states_transit, 50))
underq3_transit, overq3_transit, percentile_transit_75 = (filters.
filterPercentiles(states_transit, 75))
over100_waling = filters.filerOver100(states_walking)
underq1_states = states_walking[underq1, 1]
overq3_states = states_walking[overq3, 1]
over100_states = states_walking[over100_waling, 1]
over100_driving = filters.filerOver100(states_driving)
underq1_states_driving = states_driving[underq1_driving, 1]
overq3_states_driving = states_driving[overq3_driving, 1]
over100_states_driving = states_driving[over100_driving, 1]
over100_transit = filters.filerOver100(states_transit)
underq1_states_transit = states_transit[underq1_transit, 1]
overq3_states_transit = states_transit[overq3_transit, 1]
over100_states_transit = states_transit[over100_transit, 1]
print('walking under 25 percentile (far to normal) ' +
percentile_walking_25.astype(np.str))
print(underq1_states)
print('walking over 75 percentile (over normal trnasportation) ' +
percentile_walking_75.astype(np.str))
print(overq3_states)
print('walking over 100 in comparison to 13.1.2020')
print(over100_states)
print('Median value is ' + percentile_walking_50.astype(np.str))
print(' ')
print('Driving under 25 percentile (far to normal) ' +
percentile_driving_25.astype(np.str))
print(underq1_states_driving)
print('Driving over 75 percentile (over normal trnasportation) ',
percentile_driving_75.astype(np.str))
print(overq3_states_driving)
print('Driving over 100% in comparison to 13.1.2020')
print(over100_states_driving)
print('Median value is ' + percentile_driving_50.astype(np.str))
print(' ')
print('Transit under 25 percentile (far to normal) ' +
percentile_transit_25.astype(np.str))
print(underq1_states_transit)
print('Transit over 75 percentile (over normal trnasportation) ',
percentile_transit_75.astype(np.str))
print(overq3_states_transit.astype(np.str))
print('Transit over 100 in comparison to 13.1.2020')
print(over100_states_transit)
print('Median value is ' + percentile_transit_50.astype(np.str))
print(' ')
<|reserved_special_token_1|>
import numpy as np
from datetime import date, timedelta, datetime
from pytz import timezone
import store
import psycopg2
import requests
import os
import filters
FIRST = 4
def prepareDate():
pc_tz = timezone('US/Pacific')
n = datetime.now(pc_tz)
nd = n.date()
store.updateStore(today=nd)
def getData():
today = store.mapStore("today")
npdata = store.mapStore("data")
filedate = np.datetime64(today - timedelta(days=2))
try:
url = 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'.format(filedate)
download = requests.get(url)
download.encoding = "utf-8"
temp_file = open("temp/temp.csv", 'w', encoding='utf8')
temp_file.writelines(download.text)
npcsv = np.genfromtxt("temp/temp.csv", delimiter=',', dtype=np.str, encoding='utf8', invalid_raise=False, missing_values = np.nan, filling_values=np.nan)
temp_file.close()
store.updateStore(data=npcsv)
except Exception as e:
exceptions = store.mapStore("exceptions")
exceptions.append(e)
print("Not possible to read csv file .")
print(e)
def getDates():
dates = store.mapStore("dates")
data = store.mapStore("data")
exceptions = store.mapStore("exceptions")
if(len(exceptions) > 0):
return False
try:
d0 = date(2020, 1, 13)
d1 = data[0,FIRST:]
i = 0
newdates = []
while i <= d1.shape[0] - 1:
diffday = np.datetime64(d0 + timedelta(days=i))
newdates.append(diffday)
i += 1
newdates = np.concatenate((dates, newdates))
store.updateStore(dates=newdates)
except Exception as e:
exceptions = store.mapStore("exceptions")
exceptions.append(e)
print("Problems with handling data numpy array")
print(e)
return True
def addDataToDB(conn, filterData):
data = store.mapStore("data")
dates = store.mapStore("dates")
exceptions = store.mapStore("exceptions")
if(len(exceptions) > 0):
return False
dataValues = data[1:,FIRST:]
datesValues = dates
if(filterData is not None):
datesValues = datesValues[filterData]
dataValues = dataValues[:,filterData]
sql = "INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)"
for ix,iy in np.ndindex(dataValues.shape):
try:
date = datesValues[iy].astype(datetime)
values = data[ix+1, :FIRST]
values = tuple(values.tolist())
item = dataValues[ix, iy].item()
try:
item = float(item)
except:
item = None
values = values + tuple([date, item])
cursor = conn.cursor()
cursor.execute(sql, values)
conn.commit()
cursor.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
exceptions = store.mapStore("exceptions")
exceptions.append(error)
def addPercentileMessageToDB():
data = store.mapStore("data")
states_walking = filters.filterStates(data[1:, :])
states_driving = filters.filterStates(data[1:, :], "driving")
states_transit = filters.filterStates(data[1:, :], "transit")
underq1, overq1, percentile_walking_25 = filters.filterPercentiles(states_walking, 25)
undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(states_walking, 50)
underq3, overq3, percentile_walking_75 = filters.filterPercentiles(states_walking, 75)
underq1_driving, overq1_driving, percentile_driving_25 = filters.filterPercentiles(states_driving, 25)
undermedian_driving, overmedian_driving, percentile_driving_50 = filters.filterPercentiles(states_driving, 50)
underq3_driving, overq3_driving, percentile_driving_75 = filters.filterPercentiles(states_driving, 75)
underq1_transit, overq1_transit, percentile_transit_25 = filters.filterPercentiles(states_transit, 25)
undermedian_transit, overmedian_transit, percentile_transit_50 = filters.filterPercentiles(states_transit, 50)
underq3_transit, overq3_transit, percentile_transit_75 = filters.filterPercentiles(states_transit, 75)
over100_waling = filters.filerOver100(states_walking)
underq1_states = states_walking[underq1,1]
overq3_states = states_walking[overq3,1]
over100_states = states_walking[over100_waling, 1]
over100_driving = filters.filerOver100(states_driving)
underq1_states_driving = states_driving[underq1_driving,1]
overq3_states_driving = states_driving[overq3_driving,1]
over100_states_driving = states_driving[over100_driving, 1]
over100_transit = filters.filerOver100(states_transit)
underq1_states_transit = states_transit[underq1_transit,1]
overq3_states_transit = states_transit[overq3_transit,1]
over100_states_transit = states_transit[over100_transit, 1]
print("walking under 25 percentile (far to normal) " + percentile_walking_25.astype(np.str))
print(underq1_states)
print("walking over 75 percentile (over normal trnasportation) " + percentile_walking_75.astype(np.str))
print(overq3_states)
print("walking over 100 in comparison to 13.1.2020")
print(over100_states)
print("Median value is " + percentile_walking_50.astype(np.str))
print(" ")
print("Driving under 25 percentile (far to normal) " + percentile_driving_25.astype(np.str))
print(underq1_states_driving)
print("Driving over 75 percentile (over normal trnasportation) ", percentile_driving_75.astype(np.str))
print(overq3_states_driving)
print("Driving over 100% in comparison to 13.1.2020")
print(over100_states_driving)
print("Median value is " + percentile_driving_50.astype(np.str))
print(" ")
print("Transit under 25 percentile (far to normal) " + percentile_transit_25.astype(np.str))
print(underq1_states_transit)
print("Transit over 75 percentile (over normal trnasportation) ", percentile_transit_75.astype(np.str))
print(overq3_states_transit.astype(np.str))
print("Transit over 100 in comparison to 13.1.2020")
print(over100_states_transit)
print("Median value is " + percentile_transit_50.astype(np.str))
print(" ")
|
flexible
|
{
"blob_id": "5b4651f37cdcbb13f8ddd03327ef65af0f9cf61d",
"index": 1944,
"step-1": "<mask token>\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-2": "<mask token>\n\n\ndef getData():\n today = store.mapStore('today')\n npdata = store.mapStore('data')\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = (\n 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'\n .format(filedate))\n download = requests.get(url)\n download.encoding = 'utf-8'\n temp_file = open('temp/temp.csv', 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,\n encoding='utf8', invalid_raise=False, missing_values=np.nan,\n filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Not possible to read csv file .')\n print(e)\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-3": "<mask token>\nFIRST = 4\n\n\ndef prepareDate():\n pc_tz = timezone('US/Pacific')\n n = datetime.now(pc_tz)\n nd = n.date()\n store.updateStore(today=nd)\n\n\ndef getData():\n today = store.mapStore('today')\n npdata = store.mapStore('data')\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = (\n 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'\n .format(filedate))\n download = requests.get(url)\n download.encoding = 'utf-8'\n temp_file = open('temp/temp.csv', 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,\n encoding='utf8', invalid_raise=False, missing_values=np.nan,\n filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Not possible to read csv file .')\n print(e)\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-4": "import numpy as np\nfrom datetime import date, timedelta, datetime\nfrom pytz import timezone\nimport store\nimport psycopg2\nimport requests\nimport os\nimport filters\nFIRST = 4\n\n\ndef prepareDate():\n pc_tz = timezone('US/Pacific')\n n = datetime.now(pc_tz)\n nd = n.date()\n store.updateStore(today=nd)\n\n\ndef getData():\n today = store.mapStore('today')\n npdata = store.mapStore('data')\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = (\n 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'\n .format(filedate))\n download = requests.get(url)\n download.encoding = 'utf-8'\n temp_file = open('temp/temp.csv', 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,\n encoding='utf8', invalid_raise=False, missing_values=np.nan,\n filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Not possible to read csv file .')\n print(e)\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-5": "import numpy as np\nfrom datetime import date, timedelta, datetime\nfrom pytz import timezone\nimport store\nimport psycopg2\nimport requests\nimport os\nimport filters\n\nFIRST = 4\n\ndef prepareDate():\n pc_tz = timezone('US/Pacific')\n n = datetime.now(pc_tz)\n nd = n.date()\n store.updateStore(today=nd)\n\ndef getData():\n today = store.mapStore(\"today\")\n npdata = store.mapStore(\"data\")\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'.format(filedate)\n download = requests.get(url)\n download.encoding = \"utf-8\"\n temp_file = open(\"temp/temp.csv\", 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt(\"temp/temp.csv\", delimiter=',', dtype=np.str, encoding='utf8', invalid_raise=False, missing_values = np.nan, filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore(\"exceptions\")\n exceptions.append(e)\n print(\"Not possible to read csv file .\")\n print(e)\n\ndef getDates():\n dates = store.mapStore(\"dates\")\n data = store.mapStore(\"data\")\n exceptions = store.mapStore(\"exceptions\")\n if(len(exceptions) > 0):\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0,FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore(\"exceptions\")\n exceptions.append(e)\n print(\"Problems with handling data numpy array\")\n print(e)\n return True\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore(\"data\")\n dates = store.mapStore(\"dates\")\n exceptions = store.mapStore(\"exceptions\")\n if(len(exceptions) > 0):\n return False\n dataValues = data[1:,FIRST:]\n datesValues = dates\n if(filterData is not None):\n datesValues = datesValues[filterData]\n dataValues = dataValues[:,filterData]\n sql = \"INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)\"\n for ix,iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix+1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore(\"exceptions\")\n exceptions.append(error)\n\ndef addPercentileMessageToDB():\n data = store.mapStore(\"data\")\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], \"driving\")\n states_transit = filters.filterStates(data[1:, :], \"transit\")\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(states_walking, 75)\n\n underq1_driving, overq1_driving, percentile_driving_25 = filters.filterPercentiles(states_driving, 25)\n undermedian_driving, overmedian_driving, percentile_driving_50 = filters.filterPercentiles(states_driving, 50)\n underq3_driving, overq3_driving, percentile_driving_75 = filters.filterPercentiles(states_driving, 75)\n\n underq1_transit, overq1_transit, percentile_transit_25 = filters.filterPercentiles(states_transit, 25)\n undermedian_transit, overmedian_transit, percentile_transit_50 = filters.filterPercentiles(states_transit, 50)\n underq3_transit, overq3_transit, percentile_transit_75 = filters.filterPercentiles(states_transit, 75)\n\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1,1]\n overq3_states = states_walking[overq3,1]\n over100_states = states_walking[over100_waling, 1]\n\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving,1]\n overq3_states_driving = states_driving[overq3_driving,1]\n over100_states_driving = states_driving[over100_driving, 1]\n\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit,1]\n overq3_states_transit = states_transit[overq3_transit,1]\n over100_states_transit = states_transit[over100_transit, 1]\n print(\"walking under 25 percentile (far to normal) \" + percentile_walking_25.astype(np.str))\n print(underq1_states)\n print(\"walking over 75 percentile (over normal trnasportation) \" + percentile_walking_75.astype(np.str))\n print(overq3_states)\n print(\"walking over 100 in comparison to 13.1.2020\")\n print(over100_states)\n print(\"Median value is \" + percentile_walking_50.astype(np.str))\n print(\" \")\n\n print(\"Driving under 25 percentile (far to normal) \" + percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print(\"Driving over 75 percentile (over normal trnasportation) \", percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print(\"Driving over 100% in comparison to 13.1.2020\")\n print(over100_states_driving)\n print(\"Median value is \" + percentile_driving_50.astype(np.str))\n print(\" \")\n\n print(\"Transit under 25 percentile (far to normal) \" + percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print(\"Transit over 75 percentile (over normal trnasportation) \", percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print(\"Transit over 100 in comparison to 13.1.2020\")\n print(over100_states_transit)\n print(\"Median value is \" + percentile_transit_50.astype(np.str))\n print(\" \")\n\n\n \n\n \n \n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
import thinkbayes2 as thinkbayes
from thinkbayes2 import Pmf
import thinkplot
class Dice2(Pmf):
def __init__(self, sides):
Pmf.__init__(self)
for x in range(1, sides + 1):
self.Set(x, 1)
self.Normalize()
if __name__ == "__main__":
d6 = Dice2(6)
dices = [d6] * 6
three = thinkbayes.SampleSum(dices, 1000)
thinkplot.Pmf(three)
|
normal
|
{
"blob_id": "236dd70dec8d53062d6c38c370cb8f11dc5ef9d0",
"index": 556,
"step-1": "<mask token>\n\n\nclass Dice2(Pmf):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == '__main__':\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"step-4": "import thinkbayes2 as thinkbayes\nfrom thinkbayes2 import Pmf\nimport thinkplot\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == '__main__':\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"step-5": "import thinkbayes2 as thinkbayes\nfrom thinkbayes2 import Pmf\nimport thinkplot\n\n\nclass Dice2(Pmf):\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == \"__main__\":\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def health():
return 'OK', 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def health():
return 'OK', 200
def verify_token(token):
"""
Verifies Token from Authorization header
"""
if config.API_TOKEN is None:
logger.error('API token is not configured, auth will fail!')
return token == config.API_TOKEN
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(f'{__name__}.common')
def health():
return 'OK', 200
def verify_token(token):
"""
Verifies Token from Authorization header
"""
if config.API_TOKEN is None:
logger.error('API token is not configured, auth will fail!')
return token == config.API_TOKEN
<|reserved_special_token_1|>
import logging
import terrestrial.config as config
logger = logging.getLogger(f'{__name__}.common')
def health():
return 'OK', 200
def verify_token(token):
"""
Verifies Token from Authorization header
"""
if config.API_TOKEN is None:
logger.error('API token is not configured, auth will fail!')
return token == config.API_TOKEN
|
flexible
|
{
"blob_id": "167bd2c405171443c11fbd13575f8c7b20877289",
"index": 8470,
"step-1": "<mask token>\n\n\ndef health():\n return 'OK', 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef health():\n return 'OK', 200\n\n\ndef verify_token(token):\n \"\"\"\n Verifies Token from Authorization header\n \"\"\"\n if config.API_TOKEN is None:\n logger.error('API token is not configured, auth will fail!')\n return token == config.API_TOKEN\n",
"step-3": "<mask token>\nlogger = logging.getLogger(f'{__name__}.common')\n\n\ndef health():\n return 'OK', 200\n\n\ndef verify_token(token):\n \"\"\"\n Verifies Token from Authorization header\n \"\"\"\n if config.API_TOKEN is None:\n logger.error('API token is not configured, auth will fail!')\n return token == config.API_TOKEN\n",
"step-4": "import logging\nimport terrestrial.config as config\nlogger = logging.getLogger(f'{__name__}.common')\n\n\ndef health():\n return 'OK', 200\n\n\ndef verify_token(token):\n \"\"\"\n Verifies Token from Authorization header\n \"\"\"\n if config.API_TOKEN is None:\n logger.error('API token is not configured, auth will fail!')\n return token == config.API_TOKEN\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test2():
answer = convert_c_to_f(-40.0)
expected = -40.0
assert answer == expected
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_convert_c_to_f():
answer = convert_c_to_f(20.0)
expected = 68.0
assert answer == expected
def test2():
answer = convert_c_to_f(-40.0)
expected = -40.0
assert answer == expected
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_convert_c_to_f():
answer = convert_c_to_f(20.0)
expected = 68.0
assert answer == expected
def test2():
answer = convert_c_to_f(-40.0)
expected = -40.0
assert answer == expected
def test_fever_detection():
temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]
max_temp, is_fever = fever_detection(temp_list)
expected_max = 105.0
is_fever = True
assert max_temp == expected_max
<|reserved_special_token_1|>
from temp_conversion_script import convert_c_to_f
from temp_conversion_script import fever_detection
def test_convert_c_to_f():
answer = convert_c_to_f(20.0)
expected = 68.0
assert answer == expected
def test2():
answer = convert_c_to_f(-40.0)
expected = -40.0
assert answer == expected
def test_fever_detection():
temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]
max_temp, is_fever = fever_detection(temp_list)
expected_max = 105.0
is_fever = True
assert max_temp == expected_max
|
flexible
|
{
"blob_id": "d75187ed435c3d3aeeb31be4a0a4ed1754f8d160",
"index": 4436,
"step-1": "<mask token>\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_convert_c_to_f():\n answer = convert_c_to_f(20.0)\n expected = 68.0\n assert answer == expected\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_convert_c_to_f():\n answer = convert_c_to_f(20.0)\n expected = 68.0\n assert answer == expected\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\ndef test_fever_detection():\n temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]\n max_temp, is_fever = fever_detection(temp_list)\n expected_max = 105.0\n is_fever = True\n assert max_temp == expected_max\n",
"step-4": "from temp_conversion_script import convert_c_to_f\nfrom temp_conversion_script import fever_detection\n\n\ndef test_convert_c_to_f():\n answer = convert_c_to_f(20.0)\n expected = 68.0\n assert answer == expected\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\ndef test_fever_detection():\n temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]\n max_temp, is_fever = fever_detection(temp_list)\n expected_max = 105.0\n is_fever = True\n assert max_temp == expected_max\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
"""
* @section LICENSE
*
* @copyright
* Copyright (c) 2017 Intel Corporation
*
* @copyright
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* @copyright
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
"""
from re import match
from os import environ
import sys
from cts_core.commons.error import cts_error
from cts_core.commons.replay_controller import ReplayController
from cts_framework.actions.action import Action
from cts_framework.actions.execute.execute_test_scripts_action import ExecuteTestScriptsAction
from cts_framework.build_information import BuildInformation
from cts_framework.commons.color_printer import ColorPrinter
from cts_framework.commons.logging_helper import LoggingHelper
from cts_framework.db.dao.script_dao import ScriptDAO
from cts_framework.tests_managing.test_package.tests_packages_container import TestsPackagesContainer
from cts_framework.tests_managing.tests_manager import TestsManager
from cts_framework.tests_running.execution_feed import ExecutionFeed
def split_replay_id(replay_id):
"""converts replay_id provided by the user into script execution id
:type replay_id: str
:rtype: (Boolean, int)
"""
m = match(r"^(\d+)$", replay_id.strip())
if m:
return None, int(m.groups()[0])
cts_error("Replay id has invalid format. Expected: unsigned integer")
return True, None
class ReplayTestRunAction(Action):
ACTION = "replay"
PARAM_NAME = "ACTION"
def __init__(self, *params, **kwargs):
Action.__init__(self, *params, **kwargs)
self._logger = LoggingHelper(__name__)
def fill_parser_arguments(self):
self.parser.add_argument("replay_id", help="ID of the test script run to replay", type=str, nargs=1)
def process_action(self, configuration):
replay_id = configuration.replay_id[0]
print "Using CTS in version %s to replay execution %s" \
% (ColorPrinter.format_text(BuildInformation.BUILD_VERSION, bold=True), replay_id)
error, script_execution_id = split_replay_id(replay_id)
if error:
return
# TODO: warn user when he tries to replay using newer CTS
script_execution = ScriptDAO.get_script_execution_details(script_execution_id)
if script_execution is None:
cts_error("Recording for script execution id={id:ignore} not found", id=script_execution_id)
return
script_path = script_execution.script_path
configuration = self._configuration_from_string(script_execution.configuration)
test_plan = self._prepare_test_plan(script_path)
environ[ReplayController.CTS_REPLAY_SCRIPT_EXECUTION_ID] = str(script_execution_id)
self._execute(configuration, test_plan)
def _configuration_from_string(self, configuration_str):
configuration = {b[0]: b[1] for b in
(a.strip().split(' ', 1) for a in filter(None, configuration_str.split('--')))}
return configuration
def _prepare_test_plan(self, script_path):
test_plan = TestsPackagesContainer()
tests_manager = TestsManager()
test_scripts_found = tests_manager.get_packages()
test_scripts_found.filter(script_paths=[script_path], remove_empty=True)
test_plan += test_scripts_found
if not test_plan.packages:
print "Script to execute not found in any package"
sys.exit(0)
return test_plan
def _execute(self, configuration, test_plan):
"""
:type configuration: dict
:type test_plan: cts_framework.tests_managing.test_package.tests_packages_container.TestsPackagesContainer
"""
message = "Executing "
print "Executing:"
for package in test_plan.packages:
for suite in package.suites:
for script in suite.scripts:
print "\t* %s from suite %s from package %s" % (script.name, suite.name, package.name)
message += "%s from suite %s from package %s, " % (script.name, suite.name, package.name)
self._logger.log_debug(message)
execution_feed = ExecutionFeed(test_plan, configuration)
ExecuteTestScriptsAction.execute_configuration_group(execution_feed)
|
normal
|
{
"blob_id": "f11e6a53d8dfc60f73f346772df7a3cab14088ce",
"index": 2751,
"step-1": "\"\"\"\n * @section LICENSE\n *\n * @copyright\n * Copyright (c) 2017 Intel Corporation\n *\n * @copyright\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * @copyright\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * @copyright\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * @section DESCRIPTION\n\"\"\"\nfrom re import match\nfrom os import environ\nimport sys\n\nfrom cts_core.commons.error import cts_error\nfrom cts_core.commons.replay_controller import ReplayController\nfrom cts_framework.actions.action import Action\nfrom cts_framework.actions.execute.execute_test_scripts_action import ExecuteTestScriptsAction\nfrom cts_framework.build_information import BuildInformation\nfrom cts_framework.commons.color_printer import ColorPrinter\nfrom cts_framework.commons.logging_helper import LoggingHelper\nfrom cts_framework.db.dao.script_dao import ScriptDAO\nfrom cts_framework.tests_managing.test_package.tests_packages_container import TestsPackagesContainer\nfrom cts_framework.tests_managing.tests_manager import TestsManager\nfrom cts_framework.tests_running.execution_feed import ExecutionFeed\n\n\ndef split_replay_id(replay_id):\n \"\"\"converts replay_id provided by the user into script execution id\n :type replay_id: str\n :rtype: (Boolean, int)\n \"\"\"\n\n m = match(r\"^(\\d+)$\", replay_id.strip())\n if m:\n return None, int(m.groups()[0])\n\n cts_error(\"Replay id has invalid format. Expected: unsigned integer\")\n return True, None\n\n\nclass ReplayTestRunAction(Action):\n ACTION = \"replay\"\n PARAM_NAME = \"ACTION\"\n\n def __init__(self, *params, **kwargs):\n Action.__init__(self, *params, **kwargs)\n\n self._logger = LoggingHelper(__name__)\n\n def fill_parser_arguments(self):\n self.parser.add_argument(\"replay_id\", help=\"ID of the test script run to replay\", type=str, nargs=1)\n\n def process_action(self, configuration):\n replay_id = configuration.replay_id[0]\n print \"Using CTS in version %s to replay execution %s\" \\\n % (ColorPrinter.format_text(BuildInformation.BUILD_VERSION, bold=True), replay_id)\n\n error, script_execution_id = split_replay_id(replay_id)\n if error:\n return\n\n # TODO: warn user when he tries to replay using newer CTS\n\n script_execution = ScriptDAO.get_script_execution_details(script_execution_id)\n if script_execution is None:\n cts_error(\"Recording for script execution id={id:ignore} not found\", id=script_execution_id)\n return\n\n script_path = script_execution.script_path\n configuration = self._configuration_from_string(script_execution.configuration)\n\n test_plan = self._prepare_test_plan(script_path)\n\n environ[ReplayController.CTS_REPLAY_SCRIPT_EXECUTION_ID] = str(script_execution_id)\n self._execute(configuration, test_plan)\n\n def _configuration_from_string(self, configuration_str):\n configuration = {b[0]: b[1] for b in\n (a.strip().split(' ', 1) for a in filter(None, configuration_str.split('--')))}\n return configuration\n\n def _prepare_test_plan(self, script_path):\n test_plan = TestsPackagesContainer()\n tests_manager = TestsManager()\n test_scripts_found = tests_manager.get_packages()\n test_scripts_found.filter(script_paths=[script_path], remove_empty=True)\n test_plan += test_scripts_found\n if not test_plan.packages:\n print \"Script to execute not found in any package\"\n sys.exit(0)\n return test_plan\n\n def _execute(self, configuration, test_plan):\n \"\"\"\n :type configuration: dict\n :type test_plan: cts_framework.tests_managing.test_package.tests_packages_container.TestsPackagesContainer\n \"\"\"\n message = \"Executing \"\n print \"Executing:\"\n for package in test_plan.packages:\n for suite in package.suites:\n for script in suite.scripts:\n print \"\\t* %s from suite %s from package %s\" % (script.name, suite.name, package.name)\n message += \"%s from suite %s from package %s, \" % (script.name, suite.name, package.name)\n self._logger.log_debug(message)\n execution_feed = ExecutionFeed(test_plan, configuration)\n ExecuteTestScriptsAction.execute_configuration_group(execution_feed)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def dataproc_launcher(pytestconfig) ->DataprocClusterLauncher:
cluster_name = pytestconfig.getoption('--dataproc-cluster-name')
region = pytestconfig.getoption('--dataproc-region')
project_id = pytestconfig.getoption('--dataproc-project')
staging_location = pytestconfig.getoption('--dataproc-staging-location')
return DataprocClusterLauncher(cluster_name=cluster_name,
staging_location=staging_location, region=region, project_id=project_id
)
<|reserved_special_token_1|>
import pytest
from feast.pyspark.launchers.gcloud import DataprocClusterLauncher
@pytest.fixture
def dataproc_launcher(pytestconfig) ->DataprocClusterLauncher:
cluster_name = pytestconfig.getoption('--dataproc-cluster-name')
region = pytestconfig.getoption('--dataproc-region')
project_id = pytestconfig.getoption('--dataproc-project')
staging_location = pytestconfig.getoption('--dataproc-staging-location')
return DataprocClusterLauncher(cluster_name=cluster_name,
staging_location=staging_location, region=region, project_id=project_id
)
<|reserved_special_token_1|>
import pytest
from feast.pyspark.launchers.gcloud import DataprocClusterLauncher
@pytest.fixture
def dataproc_launcher(pytestconfig) -> DataprocClusterLauncher:
cluster_name = pytestconfig.getoption("--dataproc-cluster-name")
region = pytestconfig.getoption("--dataproc-region")
project_id = pytestconfig.getoption("--dataproc-project")
staging_location = pytestconfig.getoption("--dataproc-staging-location")
return DataprocClusterLauncher(
cluster_name=cluster_name,
staging_location=staging_location,
region=region,
project_id=project_id,
)
|
flexible
|
{
"blob_id": "ff13ac0ee401471fe5446e8149f019d9da7f3ddf",
"index": 5147,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef dataproc_launcher(pytestconfig) ->DataprocClusterLauncher:\n cluster_name = pytestconfig.getoption('--dataproc-cluster-name')\n region = pytestconfig.getoption('--dataproc-region')\n project_id = pytestconfig.getoption('--dataproc-project')\n staging_location = pytestconfig.getoption('--dataproc-staging-location')\n return DataprocClusterLauncher(cluster_name=cluster_name,\n staging_location=staging_location, region=region, project_id=project_id\n )\n",
"step-3": "import pytest\nfrom feast.pyspark.launchers.gcloud import DataprocClusterLauncher\n\n\n@pytest.fixture\ndef dataproc_launcher(pytestconfig) ->DataprocClusterLauncher:\n cluster_name = pytestconfig.getoption('--dataproc-cluster-name')\n region = pytestconfig.getoption('--dataproc-region')\n project_id = pytestconfig.getoption('--dataproc-project')\n staging_location = pytestconfig.getoption('--dataproc-staging-location')\n return DataprocClusterLauncher(cluster_name=cluster_name,\n staging_location=staging_location, region=region, project_id=project_id\n )\n",
"step-4": "import pytest\n\nfrom feast.pyspark.launchers.gcloud import DataprocClusterLauncher\n\n\n@pytest.fixture\ndef dataproc_launcher(pytestconfig) -> DataprocClusterLauncher:\n cluster_name = pytestconfig.getoption(\"--dataproc-cluster-name\")\n region = pytestconfig.getoption(\"--dataproc-region\")\n project_id = pytestconfig.getoption(\"--dataproc-project\")\n staging_location = pytestconfig.getoption(\"--dataproc-staging-location\")\n return DataprocClusterLauncher(\n cluster_name=cluster_name,\n staging_location=staging_location,\n region=region,\n project_id=project_id,\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, shutil, time, pickle, warnings, logging
import yaml
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn import metrics
from scipy.special import erfinv
from scipy.stats import mode
warnings.filterwarnings('ignore')
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df)), y=df[col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print("num group: {}".format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(
lambda x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]
for i in range(n_splits):
print("fold: {}, valid: {}. group: {}".format(
i + 1,
(df_new['fold{}_valid'.format(i + 1)] == 1).sum(),
len(fold_list[i]))
)
return df_new
def main():
df = pd.read_csv("../input/melanoma/train.csv")
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "4d0b08f8ca77d188aa218442ac0689fd2c057a89",
"index": 8357,
"step-1": "<mask token>\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\n<mask token>\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, shutil, time, pickle, warnings, logging\nimport yaml\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\nfrom scipy.special import erfinv\nfrom scipy.stats import mode\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, shutil, time, pickle, warnings, logging\nimport yaml\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\nfrom scipy.special import erfinv\nfrom scipy.stats import mode\n\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(\n np.arange(len(df)), y=df[col_stratified]))\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(\n np.arange(len(df))))\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print(\"num group: {}\".format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(\n lambda x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]\n\n for i in range(n_splits):\n print(\"fold: {}, valid: {}. group: {}\".format(\n i + 1,\n (df_new['fold{}_valid'.format(i + 1)] == 1).sum(),\n len(fold_list[i]))\n )\n\n return df_new\n\n\ndef main():\n df = pd.read_csv(\"../input/melanoma/train.csv\")\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
def test_linear_transform_undefined():
eta = ETA()
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert eta.eta_epoch is None
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
<|reserved_special_token_1|>
from collections import deque
from etaprogress.eta import ETA
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
def test_linear_transform_undefined():
eta = ETA()
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert eta.eta_epoch is None
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
|
flexible
|
{
"blob_id": "810017cd5814fc20ebcdbdf26a32ea1bcfc88625",
"index": 2164,
"step-1": "<mask token>\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_linear_slope_1():\n eta = ETA(100)\n eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])\n getattr(eta, '_calculate')()\n assert 100 == eta.eta_epoch\n assert 1.0 == eta.rate\n assert 1.0 == eta.rate_unstable\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_linear_slope_1():\n eta = ETA(100)\n eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])\n getattr(eta, '_calculate')()\n assert 100 == eta.eta_epoch\n assert 1.0 == eta.rate\n assert 1.0 == eta.rate_unstable\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\ndef test_linear_transform_undefined():\n eta = ETA()\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert eta.eta_epoch is None\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n",
"step-4": "from collections import deque\nfrom etaprogress.eta import ETA\n\n\ndef test_linear_slope_1():\n eta = ETA(100)\n eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])\n getattr(eta, '_calculate')()\n assert 100 == eta.eta_epoch\n assert 1.0 == eta.rate\n assert 1.0 == eta.rate_unstable\n\n\ndef test_linear_slope_2():\n eta = ETA(100)\n eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])\n getattr(eta, '_calculate')()\n assert 50 == eta.eta_epoch\n assert 2.0 == eta.rate\n assert 2.0 == eta.rate_unstable\n\n\ndef test_linear_transform():\n \"\"\"Wolfram Alpha:\n x is the timestamp. y is the numerator. 120 is the denominator.\n linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}\n\n The closer we get to 100%, the more vertical shift/transform is applied to the line.\n As we near the end we want the line to get closer to the last point on the graph.\n This avoids having 99% with an ETA in the past.\n \"\"\"\n eta = ETA(120)\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert 4.4 < eta.eta_epoch < 4.6\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n\n\ndef test_linear_transform_undefined():\n eta = ETA()\n eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])\n getattr(eta, '_calculate')()\n assert eta.eta_epoch is None\n assert 30 < eta.rate < 35\n assert 12 < eta.rate_unstable < 13\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.