code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
NUM_CLASSES = 31
AUDIO_SR = 16000
AUDIO_LENGTH = 16000
LIBROSA_AUDIO_LENGTH = 22050
EPOCHS = 25
categories = {
'stop': 0,
'nine': 1,
'off': 2,
'four': 3,
'right': 4,
'eight': 5,
'one': 6,
'bird': 7,
'dog': 8,
'no': 9,
'on': 10,
'seven': 11,
'cat': 12,
'left': 13,
'three': 14,
'tree': 15,
'bed': 16,
'zero': 17,
'happy': 18,
'sheila': 19,
'five': 20,
'down': 21,
'marvin': 22,
'six': 23,
'up': 24,
'wow': 25,
'house': 26,
'go': 27,
'yes': 28,
'two': 29,
'_background_noise_': 30,
}
inv_categories = {
0: 'stop',
1: 'nine',
2: 'off',
3: 'four',
4: 'right',
5: 'eight',
6: 'one',
7: 'bird',
8: 'dog',
9: 'no',
10: 'on',
11: 'seven',
12: 'cat',
13: 'left',
14: 'three',
15: 'tree',
16: 'bed',
17: 'zero',
18: 'happy',
19: 'sheila',
20: 'five',
21: 'down',
22: 'marvin',
23: 'six',
24: 'up',
25: 'wow',
26: 'house',
27: 'go',
28: 'yes',
29: 'two',
30: '_background_noise_'
}
# Marvin model
INPUT_SHAPE = (99, 40)
TARGET_SHAPE = (99, 40, 1)
PARSE_PARAMS = (0.025, 0.01, 40)
filters = [16, 32, 64, 128, 256]
DROPOUT = 0.25
KERNEL_SIZE = (3, 3)
POOL_SIZE = (2, 2)
DENSE_1 = 512
DENSE_2 = 256
BATCH_SIZE = 128
PATIENCE = 5
LEARNING_RATE = 0.001
|
normal
|
{
"blob_id": "6a9e18cde94258b01a37f459eceaac58118b4976",
"index": 5813,
"step-1": "<mask token>\n",
"step-2": "NUM_CLASSES = 31\nAUDIO_SR = 16000\nAUDIO_LENGTH = 16000\nLIBROSA_AUDIO_LENGTH = 22050\nEPOCHS = 25\ncategories = {'stop': 0, 'nine': 1, 'off': 2, 'four': 3, 'right': 4,\n 'eight': 5, 'one': 6, 'bird': 7, 'dog': 8, 'no': 9, 'on': 10, 'seven': \n 11, 'cat': 12, 'left': 13, 'three': 14, 'tree': 15, 'bed': 16, 'zero': \n 17, 'happy': 18, 'sheila': 19, 'five': 20, 'down': 21, 'marvin': 22,\n 'six': 23, 'up': 24, 'wow': 25, 'house': 26, 'go': 27, 'yes': 28, 'two':\n 29, '_background_noise_': 30}\ninv_categories = {(0): 'stop', (1): 'nine', (2): 'off', (3): 'four', (4):\n 'right', (5): 'eight', (6): 'one', (7): 'bird', (8): 'dog', (9): 'no',\n (10): 'on', (11): 'seven', (12): 'cat', (13): 'left', (14): 'three', (\n 15): 'tree', (16): 'bed', (17): 'zero', (18): 'happy', (19): 'sheila',\n (20): 'five', (21): 'down', (22): 'marvin', (23): 'six', (24): 'up', (\n 25): 'wow', (26): 'house', (27): 'go', (28): 'yes', (29): 'two', (30):\n '_background_noise_'}\nINPUT_SHAPE = 99, 40\nTARGET_SHAPE = 99, 40, 1\nPARSE_PARAMS = 0.025, 0.01, 40\nfilters = [16, 32, 64, 128, 256]\nDROPOUT = 0.25\nKERNEL_SIZE = 3, 3\nPOOL_SIZE = 2, 2\nDENSE_1 = 512\nDENSE_2 = 256\nBATCH_SIZE = 128\nPATIENCE = 5\nLEARNING_RATE = 0.001\n",
"step-3": "NUM_CLASSES = 31\n\nAUDIO_SR = 16000\nAUDIO_LENGTH = 16000\nLIBROSA_AUDIO_LENGTH = 22050\n\nEPOCHS = 25\n\ncategories = {\n 'stop': 0,\n 'nine': 1,\n 'off': 2,\n 'four': 3,\n 'right': 4,\n 'eight': 5,\n 'one': 6,\n 'bird': 7,\n 'dog': 8,\n 'no': 9,\n 'on': 10,\n 'seven': 11,\n 'cat': 12,\n 'left': 13,\n 'three': 14,\n 'tree': 15,\n 'bed': 16,\n 'zero': 17,\n 'happy': 18,\n 'sheila': 19,\n 'five': 20,\n 'down': 21,\n 'marvin': 22,\n 'six': 23,\n 'up': 24,\n 'wow': 25,\n 'house': 26,\n 'go': 27,\n 'yes': 28,\n 'two': 29,\n '_background_noise_': 30,\n}\n\n\ninv_categories = {\n 0: 'stop',\n 1: 'nine',\n 2: 'off',\n 3: 'four',\n 4: 'right',\n 5: 'eight',\n 6: 'one',\n 7: 'bird',\n 8: 'dog',\n 9: 'no',\n 10: 'on',\n 11: 'seven',\n 12: 'cat',\n 13: 'left',\n 14: 'three',\n 15: 'tree',\n 16: 'bed',\n 17: 'zero',\n 18: 'happy',\n 19: 'sheila',\n 20: 'five',\n 21: 'down',\n 22: 'marvin',\n 23: 'six',\n 24: 'up',\n 25: 'wow',\n 26: 'house',\n 27: 'go',\n 28: 'yes',\n 29: 'two',\n 30: '_background_noise_'\n }\n\n# Marvin model\nINPUT_SHAPE = (99, 40)\nTARGET_SHAPE = (99, 40, 1)\nPARSE_PARAMS = (0.025, 0.01, 40)\nfilters = [16, 32, 64, 128, 256]\n\nDROPOUT = 0.25\nKERNEL_SIZE = (3, 3)\nPOOL_SIZE = (2, 2)\nDENSE_1 = 512\nDENSE_2 = 256\n\nBATCH_SIZE = 128\nPATIENCE = 5\nLEARNING_RATE = 0.001\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Create your views here.
# -*- coding: utf-8 -*-
from json import dumps
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.conf import settings
from utils import Utils, MERCS, ENTITY, PARTS, ARMY, DETAIL_INFO
RUNNING_INFO = {}
TIMER = []
class Crisis_View():
def __init__(self):
self.utils = Utils()
self.utils.watch_dog_runner()
def is_exist(self, uid):
if uid in RUNNING_INFO.keys():
return True
return False
def login(self, request):
""" Login """
context = {}
request.session['is_auth'] = False
if request.method == "POST":
if "connect" in request.POST:
try:
uid = request.POST.get("uid", None)
auth = request.POST.get("auth", None)
if uid is not None:
uid = str(uid)
# uid = "vk:2342994"
# auth = "c0a426784e761547e57afcc6d2bbc367"
request.session['uid'] = uid
flag, data = self.utils.get_participant_info(uid, auth)
if flag:
request.session['is_auth'] = True
if uid.startswith("br"):
request.session['is_daily'] = False
else:
request.session['is_daily'] = True
request.session["is_leader"] = data.get("is_leader", False)
if uid not in RUNNING_INFO.keys():
data.update({"is_run": False, "is_attack": False})
else:
temp = RUNNING_INFO[uid]
temp.update(data)
data = temp
RUNNING_INFO.update({uid: data})
context.update(data)
return HttpResponseRedirect('order')
else:
context = {"error": True, "message": data}
except Exception, err:
self.utils.logger.error(err.message)
if "Unable to create a new session key" in err.message:
context = {"error": True, "message": "Internal server Error. Please, contact administrator and try later..."}
else:
context = {"error": True, "message": "User select failed. Please, try again..."}
return render_to_response("crisis/user_select.html",
context,
context_instance=RequestContext(request))
def info(self, request):
return HttpResponse("= %s =" % RUNNING_INFO)
def order(self, request):
""" Gather resources and order units """
is_auth = request.session.get("is_auth", False)
if not is_auth:
return HttpResponseRedirect('/crisis')
uid = request.session['uid']
context = RUNNING_INFO.get(uid, {})
context.update({"is_auth": is_auth,
"is_daily": request.session.get("is_daily", False),
"is_leader": request.session.get("is_leader", False),
"entity_list": ENTITY,
"parts_list": PARTS,
"detail_info_list": DETAIL_INFO})
if "priority" not in context:
priority = {}
for item in ARMY:
priority.update({item: 1})
context.update({"priority": priority})
if context.get("is_run", False):
context.update({"left_time": self.utils.get_remaining_time(uid),
"order": self.utils.get_current_unit_order(uid)})
""" Context Example
context = {"username": self.utils.get_user_name(uid),
"is_run": False,
"is_auth": is_auth,
"resource": {"money": 100, "food": 200, "fuel": 300},
"entity": {"armor_composite": 1, "armor_plate": 2, "control_block": 3,
"gun_receiver": 4, "kevlar_fiber": 5, "laser_aimer": 6,
"powder_charge": 7, "rare_item": 8, "tnt_charge": 9},
"parts": {"artillery_armor": 1, "artillery_chassis": 2, "artillery_shell": 3, "detonator": 4,
"gunner_armor": 5, "gunner_gun": 6, "jeep_armor": 7, "jeep_gun": 8, "sniper_armor": 9,
"sniper_gun": 10, "soldier_gun": 11, "tank_chassis": 12, "thrower_armor": 13,
"thrower_gun": 14, "wave_emitter": 15},
'order': {'soldier': 1, 'thrower': 4, 'artillery': 8, 'gunner': 2, 'base_artillery': 7, 'jeep': 6, 'artillery_emp': 9, 'base_tank': 5, 'artillery_cassete': 0, 'sniper': 3}
}
"""
if request.method == "POST":
if "start" in request.POST:
order, priority = {}, {}
data = dict(request.POST)
for item in ARMY:
try:
count = int(data.get(item, [''])[0])
except:
count = 0
try:
prior = int(data.get("%s_priority" % item, [''])[0])
except:
prior = 1
order.update({item: count})
priority.update({item: prior})
context.update({"is_run": True,
"order": order,
"priority": priority,
"left_time": self.utils.get_remaining_time(uid)})
RUNNING_INFO.update({uid: context})
self.utils.start_gather(uid, context)
elif "stop" in request.POST:
uid = request.session['uid']
context = RUNNING_INFO.get(uid, {})
context.update({"is_run": False, "left_time": "00:00:00"})
RUNNING_INFO.update({uid: context})
self.utils.stop_gather(uid)
return render_to_response("crisis/order.html",
context,
context_instance=RequestContext(request))
def daily(self, request):
""" Set Daily Mercenary """
is_auth = request.session.get("is_auth", False)
is_daily = request.session.get("is_daily", False)
if not is_auth or not is_daily:
return HttpResponseRedirect('/crisis')
uid = request.session.get("uid", None)
params = self.utils.get_daily_params(uid)
context = {"username": self.utils.get_user_name(uid),
"is_auth": is_auth,
"is_daily": is_daily,
"is_leader": request.session.get("is_leader", False),
"mercs": ["off"] + MERCS + ["random"],
"daily_merc": params[0],
"daily_schema": params[1],
"event_schema": params[2],
"group_plugin": params[3],
}
if request.method == "POST":
if "save" in request.POST:
daily_merc = request.POST.get("daily_merc", None)
self.utils.update_participant_params(uid, "DAILY_MERC", daily_merc)
daily_schema = request.POST.get("daily_schema", None)
self.utils.update_participant_params(uid, "DAILY_SCHEMA", daily_schema)
event_schema = request.POST.get("event_schema", None)
self.utils.update_participant_params(uid, "EVENT_SCHEMA", event_schema)
group_plugin = request.POST.get("group_plugin", None)
self.utils.update_participant_params(uid, "GROUP_PLUGIN", group_plugin)
context.update({"daily_merc": daily_merc,
"daily_schema": daily_schema,
"event_schema": event_schema,
"group_plugin": group_plugin})
return render_to_response("crisis/daily.html",
context,
context_instance=RequestContext(request))
def trade(self, request):
""" Buy entities at trade house """
is_auth = request.session.get("is_auth", False)
is_daily = request.session.get("is_daily", False)
if not is_auth or not is_daily:
return HttpResponseRedirect('/crisis')
uid = request.session.get("uid", None)
entity_order = self.utils.get_trade_order(uid)
context = {"username": self.utils.get_user_name(uid),
"is_auth": is_auth,
"is_daily": is_daily,
"is_leader": request.session.get("is_leader", False),
"slicer_list": [":4", "4:8", "8:"],
"entities": ["soldier_gun", "gunner_gun"] + ENTITY,
"entity_order": entity_order,
}
if request.method == "POST":
if "save" in request.POST:
entity_order = {}
for key, value in dict(request.POST).iteritems():
if "@money" in key or "@gold" in key:
entity, kind = key.split("@")
temp = list(entity_order.get(entity, []))
if kind not in temp:
temp.append(str(kind))
entity_order.update({entity: temp})
elif "money_limit" in key:
value = value[0]
if value:
if int(value) < 1000:
money_limit = 1000
else:
money_limit = int(value)
else:
money_limit = 10000
entity_order.update({"money_limit": money_limit})
context.update({"entity_order": entity_order})
self.utils.update_participant_params(uid, "TRADE_ORDER", dumps(entity_order))
return render_to_response("crisis/trade.html",
context,
context_instance=RequestContext(request))
def city(self, request):
""" Attack city """
is_auth = request.session.get("is_auth", False)
is_daily = request.session.get("is_daily", False)
if not is_auth or not is_daily:
return HttpResponseRedirect('/crisis')
uid = request.session.get("uid", None)
regions, cities = self.utils.get_region_cities()
context = RUNNING_INFO.get(uid, {})
context.update({"username": self.utils.get_user_name(uid),
"is_auth": is_auth,
"is_daily": is_daily,
"is_leader": request.session.get("is_leader", False),
"is_attack": context.get("is_attack", False),
"regions": regions,
"cities": cities
})
if request.method == "POST":
if "start" in request.POST:
try:
context.update({"is_attack": True, "selected_city": request.POST.get("city")})
RUNNING_INFO.update({uid: context})
self.utils.start_city_attack(uid, context)
except Exception, err:
self.utils.logger.error("Error during start city attack: %s" % err)
elif "stop" in request.POST:
try:
context.update({"is_attack": False})
self.utils.stop_city_attack(uid)
except:
pass
return render_to_response("crisis/city.html",
context,
context_instance=RequestContext(request))
def statistics(self, request):
""" Clan participants weekly statistics """
context = {"uid": request.session.get("uid", None),
"is_auth": request.session.get("is_auth", False),
"is_daily": request.session.get("is_daily", False),
"is_leader": request.session.get("is_leader", False),
}
context.update({"statistics": self.utils.get_statistics()})
# "dates": self.utils.get_artefacts_dates()})
return render_to_response("crisis/statistics.html",
context,
context_instance=RequestContext(request))
def about(self, request):
""" Information about developer """
context = {"is_auth": request.session.get("is_auth", False),
"is_daily": request.session.get("is_daily", False),
"is_leader": request.session.get("is_leader", False),
"authors": settings.ADMINS}
return render_to_response("crisis/about.html",
context,
context_instance=RequestContext(request))
|
normal
|
{
"blob_id": "b1c8aceab44574d0f53d30969861be028c920ef2",
"index": 5007,
"step-1": "# Create your views here.\n# -*- coding: utf-8 -*-\n\nfrom json import dumps\nfrom django.shortcuts import render_to_response\nfrom django.http import Http404, HttpResponseRedirect, HttpResponse\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom utils import Utils, MERCS, ENTITY, PARTS, ARMY, DETAIL_INFO\n\nRUNNING_INFO = {}\nTIMER = []\n\n\nclass Crisis_View():\n def __init__(self):\n self.utils = Utils()\n self.utils.watch_dog_runner()\n\n def is_exist(self, uid):\n if uid in RUNNING_INFO.keys():\n return True\n return False\n\n def login(self, request):\n \"\"\" Login \"\"\"\n context = {}\n request.session['is_auth'] = False\n if request.method == \"POST\":\n if \"connect\" in request.POST:\n try:\n uid = request.POST.get(\"uid\", None)\n auth = request.POST.get(\"auth\", None)\n if uid is not None:\n uid = str(uid)\n # uid = \"vk:2342994\"\n # auth = \"c0a426784e761547e57afcc6d2bbc367\"\n request.session['uid'] = uid\n flag, data = self.utils.get_participant_info(uid, auth)\n if flag:\n request.session['is_auth'] = True\n if uid.startswith(\"br\"):\n request.session['is_daily'] = False\n else:\n request.session['is_daily'] = True\n\n request.session[\"is_leader\"] = data.get(\"is_leader\", False)\n\n if uid not in RUNNING_INFO.keys():\n data.update({\"is_run\": False, \"is_attack\": False})\n else:\n temp = RUNNING_INFO[uid]\n temp.update(data)\n data = temp\n\n RUNNING_INFO.update({uid: data})\n context.update(data)\n\n return HttpResponseRedirect('order')\n else:\n context = {\"error\": True, \"message\": data}\n except Exception, err:\n self.utils.logger.error(err.message)\n if \"Unable to create a new session key\" in err.message:\n context = {\"error\": True, \"message\": \"Internal server Error. Please, contact administrator and try later...\"}\n else:\n context = {\"error\": True, \"message\": \"User select failed. Please, try again...\"}\n\n return render_to_response(\"crisis/user_select.html\",\n context,\n context_instance=RequestContext(request))\n\n def info(self, request):\n return HttpResponse(\"= %s =\" % RUNNING_INFO)\n\n def order(self, request):\n \"\"\" Gather resources and order units \"\"\"\n is_auth = request.session.get(\"is_auth\", False)\n if not is_auth:\n return HttpResponseRedirect('/crisis')\n\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_auth\": is_auth,\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"entity_list\": ENTITY,\n \"parts_list\": PARTS,\n \"detail_info_list\": DETAIL_INFO})\n\n if \"priority\" not in context:\n priority = {}\n for item in ARMY:\n priority.update({item: 1})\n context.update({\"priority\": priority})\n\n if context.get(\"is_run\", False):\n context.update({\"left_time\": self.utils.get_remaining_time(uid),\n \"order\": self.utils.get_current_unit_order(uid)})\n \"\"\" Context Example\n context = {\"username\": self.utils.get_user_name(uid),\n \"is_run\": False,\n \"is_auth\": is_auth,\n \"resource\": {\"money\": 100, \"food\": 200, \"fuel\": 300},\n \"entity\": {\"armor_composite\": 1, \"armor_plate\": 2, \"control_block\": 3,\n \"gun_receiver\": 4, \"kevlar_fiber\": 5, \"laser_aimer\": 6,\n \"powder_charge\": 7, \"rare_item\": 8, \"tnt_charge\": 9},\n \"parts\": {\"artillery_armor\": 1, \"artillery_chassis\": 2, \"artillery_shell\": 3, \"detonator\": 4,\n \"gunner_armor\": 5, \"gunner_gun\": 6, \"jeep_armor\": 7, \"jeep_gun\": 8, \"sniper_armor\": 9,\n \"sniper_gun\": 10, \"soldier_gun\": 11, \"tank_chassis\": 12, \"thrower_armor\": 13,\n \"thrower_gun\": 14, \"wave_emitter\": 15},\n 'order': {'soldier': 1, 'thrower': 4, 'artillery': 8, 'gunner': 2, 'base_artillery': 7, 'jeep': 6, 'artillery_emp': 9, 'base_tank': 5, 'artillery_cassete': 0, 'sniper': 3}\n }\n \"\"\"\n\n if request.method == \"POST\":\n if \"start\" in request.POST:\n order, priority = {}, {}\n data = dict(request.POST)\n\n for item in ARMY:\n try:\n count = int(data.get(item, [''])[0])\n except:\n count = 0\n try:\n prior = int(data.get(\"%s_priority\" % item, [''])[0])\n except:\n prior = 1\n order.update({item: count})\n priority.update({item: prior})\n\n context.update({\"is_run\": True,\n \"order\": order,\n \"priority\": priority,\n \"left_time\": self.utils.get_remaining_time(uid)})\n\n RUNNING_INFO.update({uid: context})\n self.utils.start_gather(uid, context)\n elif \"stop\" in request.POST:\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_run\": False, \"left_time\": \"00:00:00\"})\n RUNNING_INFO.update({uid: context})\n self.utils.stop_gather(uid)\n\n return render_to_response(\"crisis/order.html\",\n context,\n context_instance=RequestContext(request))\n\n def daily(self, request):\n \"\"\" Set Daily Mercenary \"\"\"\n is_auth = request.session.get(\"is_auth\", False)\n is_daily = request.session.get(\"is_daily\", False)\n if not is_auth or not is_daily:\n return HttpResponseRedirect('/crisis')\n uid = request.session.get(\"uid\", None)\n\n params = self.utils.get_daily_params(uid)\n context = {\"username\": self.utils.get_user_name(uid),\n \"is_auth\": is_auth,\n \"is_daily\": is_daily,\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"mercs\": [\"off\"] + MERCS + [\"random\"],\n \"daily_merc\": params[0],\n \"daily_schema\": params[1],\n \"event_schema\": params[2],\n \"group_plugin\": params[3],\n }\n\n if request.method == \"POST\":\n if \"save\" in request.POST:\n daily_merc = request.POST.get(\"daily_merc\", None)\n self.utils.update_participant_params(uid, \"DAILY_MERC\", daily_merc)\n\n daily_schema = request.POST.get(\"daily_schema\", None)\n self.utils.update_participant_params(uid, \"DAILY_SCHEMA\", daily_schema)\n\n event_schema = request.POST.get(\"event_schema\", None)\n self.utils.update_participant_params(uid, \"EVENT_SCHEMA\", event_schema)\n\n group_plugin = request.POST.get(\"group_plugin\", None)\n self.utils.update_participant_params(uid, \"GROUP_PLUGIN\", group_plugin)\n\n context.update({\"daily_merc\": daily_merc,\n \"daily_schema\": daily_schema,\n \"event_schema\": event_schema,\n \"group_plugin\": group_plugin})\n return render_to_response(\"crisis/daily.html\",\n context,\n context_instance=RequestContext(request))\n\n def trade(self, request):\n \"\"\" Buy entities at trade house \"\"\"\n is_auth = request.session.get(\"is_auth\", False)\n is_daily = request.session.get(\"is_daily\", False)\n if not is_auth or not is_daily:\n return HttpResponseRedirect('/crisis')\n uid = request.session.get(\"uid\", None)\n\n entity_order = self.utils.get_trade_order(uid)\n context = {\"username\": self.utils.get_user_name(uid),\n \"is_auth\": is_auth,\n \"is_daily\": is_daily,\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"slicer_list\": [\":4\", \"4:8\", \"8:\"],\n \"entities\": [\"soldier_gun\", \"gunner_gun\"] + ENTITY,\n \"entity_order\": entity_order,\n }\n\n if request.method == \"POST\":\n if \"save\" in request.POST:\n entity_order = {}\n for key, value in dict(request.POST).iteritems():\n if \"@money\" in key or \"@gold\" in key:\n entity, kind = key.split(\"@\")\n\n temp = list(entity_order.get(entity, []))\n if kind not in temp:\n temp.append(str(kind))\n\n entity_order.update({entity: temp})\n elif \"money_limit\" in key:\n value = value[0]\n if value:\n if int(value) < 1000:\n money_limit = 1000\n else:\n money_limit = int(value)\n else:\n money_limit = 10000\n entity_order.update({\"money_limit\": money_limit})\n\n context.update({\"entity_order\": entity_order})\n self.utils.update_participant_params(uid, \"TRADE_ORDER\", dumps(entity_order))\n return render_to_response(\"crisis/trade.html\",\n context,\n context_instance=RequestContext(request))\n\n def city(self, request):\n \"\"\" Attack city \"\"\"\n is_auth = request.session.get(\"is_auth\", False)\n is_daily = request.session.get(\"is_daily\", False)\n if not is_auth or not is_daily:\n return HttpResponseRedirect('/crisis')\n uid = request.session.get(\"uid\", None)\n\n regions, cities = self.utils.get_region_cities()\n\n context = RUNNING_INFO.get(uid, {})\n context.update({\"username\": self.utils.get_user_name(uid),\n \"is_auth\": is_auth,\n \"is_daily\": is_daily,\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"is_attack\": context.get(\"is_attack\", False),\n \"regions\": regions,\n \"cities\": cities\n })\n if request.method == \"POST\":\n if \"start\" in request.POST:\n try:\n context.update({\"is_attack\": True, \"selected_city\": request.POST.get(\"city\")})\n RUNNING_INFO.update({uid: context})\n self.utils.start_city_attack(uid, context)\n except Exception, err:\n self.utils.logger.error(\"Error during start city attack: %s\" % err)\n elif \"stop\" in request.POST:\n try:\n context.update({\"is_attack\": False})\n self.utils.stop_city_attack(uid)\n except:\n pass\n return render_to_response(\"crisis/city.html\",\n context,\n context_instance=RequestContext(request))\n\n def statistics(self, request):\n \"\"\" Clan participants weekly statistics \"\"\"\n context = {\"uid\": request.session.get(\"uid\", None),\n \"is_auth\": request.session.get(\"is_auth\", False),\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n }\n\n context.update({\"statistics\": self.utils.get_statistics()})\n # \"dates\": self.utils.get_artefacts_dates()})\n return render_to_response(\"crisis/statistics.html\",\n context,\n context_instance=RequestContext(request))\n\n def about(self, request):\n \"\"\" Information about developer \"\"\"\n context = {\"is_auth\": request.session.get(\"is_auth\", False),\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"authors\": settings.ADMINS}\n return render_to_response(\"crisis/about.html\",\n context,\n context_instance=RequestContext(request))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# TUPLE IMUTAVEL
# GERALMENTE HETEORGENEA
# tupla com 1 ou 0 elementos
#
# empty = ()
# singleton = 'breno',
# print(type(empty))
# print(singleton)
# tuplas podem ser aninhadas
# t = 12345, 54321, 'hello!'
# u = t, (1, 2, 3, 4, 5)
#imutaveis
# t[0] = 88888
|
normal
|
{
"blob_id": "34e902fbced13629657494eedfe385d3b5ae3f55",
"index": 2489,
"step-1": "# TUPLE IMUTAVEL\n# GERALMENTE HETEORGENEA\n\n# tupla com 1 ou 0 elementos\n#\n# empty = ()\n# singleton = 'breno',\n# print(type(empty))\n# print(singleton)\n\n# tuplas podem ser aninhadas\n# t = 12345, 54321, 'hello!'\n# u = t, (1, 2, 3, 4, 5)\n\n#imutaveis\n# t[0] = 88888",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
Read all the images from a directory,
resize, rescale and rename them.
"""
|
flexible
|
{
"blob_id": "670efbd9879099b24a87e19a531c4e3bbce094c6",
"index": 1666,
"step-1": "<mask token>\n",
"step-2": "\n\n\"\"\"\nRead all the images from a directory,\nresize, rescale and rename them.\n\"\"\"\n\n\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
session = boto3.session.Session(profile_name='dev_root')
iam_re = session.resource(service_name='iam')
for each in range(701, 1100):
try:
iam_re.create_user(UserName='ixasisiidemo' + str(each))
if each == 509:
sys.exit()
except:
continue
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import boto3, sys
from pprint import pprint
while True:
session = boto3.session.Session(profile_name='dev_root')
iam_re = session.resource(service_name='iam')
for each in range(701, 1100):
try:
iam_re.create_user(UserName='ixasisiidemo' + str(each))
if each == 509:
sys.exit()
except:
continue
<|reserved_special_token_1|>
'''
Take list of iam users in a csv file like
S_NO, IAM_User_Name,Programatic_Access,Console_Access,PolicyARN
1,XYZ, Yes,No,arn:aws:iam::aws:policy/AdministratorAccess
2.pqr,Yes,Yes,arn:aws:iam::aws:policy/AdministratorAccess
3.abc,No,Yes,arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess
'''
import boto3,sys
from pprint import pprint
while True:
session=boto3.session.Session(profile_name="dev_root")
iam_re=session.resource(service_name="iam")
for each in range(701,1100):
try:
iam_re.create_user(UserName="ixasisiidemo"+str(each))
if each==509:
sys.exit()
except:
continue
|
flexible
|
{
"blob_id": "00afab442f56d364c785324f816b52b4a6be609d",
"index": 3078,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n session = boto3.session.Session(profile_name='dev_root')\n iam_re = session.resource(service_name='iam')\n for each in range(701, 1100):\n try:\n iam_re.create_user(UserName='ixasisiidemo' + str(each))\n if each == 509:\n sys.exit()\n except:\n continue\n",
"step-3": "<mask token>\nimport boto3, sys\nfrom pprint import pprint\nwhile True:\n session = boto3.session.Session(profile_name='dev_root')\n iam_re = session.resource(service_name='iam')\n for each in range(701, 1100):\n try:\n iam_re.create_user(UserName='ixasisiidemo' + str(each))\n if each == 509:\n sys.exit()\n except:\n continue\n",
"step-4": "'''\nTake list of iam users in a csv file like\n\nS_NO, IAM_User_Name,Programatic_Access,Console_Access,PolicyARN\n\n1,XYZ, Yes,No,arn:aws:iam::aws:policy/AdministratorAccess\n\n2.pqr,Yes,Yes,arn:aws:iam::aws:policy/AdministratorAccess\n\n3.abc,No,Yes,arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess\n\n'''\n\nimport boto3,sys\nfrom pprint import pprint\nwhile True:\n session=boto3.session.Session(profile_name=\"dev_root\")\n iam_re=session.resource(service_name=\"iam\")\n for each in range(701,1100):\n try:\n iam_re.create_user(UserName=\"ixasisiidemo\"+str(each))\n if each==509:\n sys.exit()\n except:\n continue\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
import os
db = SQLAlchemy()
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'static')
def create_app(config_name):
app = Flask(__name__, static_folder=static_file_dir)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
db.init_app(app)
return app
|
normal
|
{
"blob_id": "bee6ba1db608c1d9c8114f89d4b3abab795a6b86",
"index": 3843,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'static')\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config import config\nimport os\ndb = SQLAlchemy()\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'static')\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def convertWPFile(mapName):
fullMapName = 'mp_' + mapName + '_waypoints.gsc'
waypoints = open(fullMapName, 'r')
wpLines = waypoints.readlines()
waypoints.close()
wpLinesNew = []
temp = 0
for i, j in enumerate(wpLines):
if i > 31:
if 'level.' in j:
wpLinesNew.append(' ' + j[10:])
wpLinesNew.append('return waypoints;\n}')
newMapName = mapName.capitalize() + '.gsc'
newWPFile = open(newMapName, 'w')
newWPFile.write(mapName.capitalize() + '()\n{\n waypoints = [];\n')
for i in wpLinesNew:
if 'waypointCount' not in i:
newWPFile.write(' ' + i.strip() + '\n')
print('\n%s.gsc successfully converted' % mapName)
newWPFile.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def convertWPFile(mapName):
fullMapName = 'mp_' + mapName + '_waypoints.gsc'
waypoints = open(fullMapName, 'r')
wpLines = waypoints.readlines()
waypoints.close()
wpLinesNew = []
temp = 0
for i, j in enumerate(wpLines):
if i > 31:
if 'level.' in j:
wpLinesNew.append(' ' + j[10:])
wpLinesNew.append('return waypoints;\n}')
newMapName = mapName.capitalize() + '.gsc'
newWPFile = open(newMapName, 'w')
newWPFile.write(mapName.capitalize() + '()\n{\n waypoints = [];\n')
for i in wpLinesNew:
if 'waypointCount' not in i:
newWPFile.write(' ' + i.strip() + '\n')
print('\n%s.gsc successfully converted' % mapName)
newWPFile.close()
for name in mapNameList:
convertWPFile(name.strip())
<|reserved_special_token_1|>
mapName = input(
"""
Enter map name(s) (omitting the mp_ prefix)
Separate map names with comma
:"""
).lower()
mapNameList = mapName.split(',')
def convertWPFile(mapName):
fullMapName = 'mp_' + mapName + '_waypoints.gsc'
waypoints = open(fullMapName, 'r')
wpLines = waypoints.readlines()
waypoints.close()
wpLinesNew = []
temp = 0
for i, j in enumerate(wpLines):
if i > 31:
if 'level.' in j:
wpLinesNew.append(' ' + j[10:])
wpLinesNew.append('return waypoints;\n}')
newMapName = mapName.capitalize() + '.gsc'
newWPFile = open(newMapName, 'w')
newWPFile.write(mapName.capitalize() + '()\n{\n waypoints = [];\n')
for i in wpLinesNew:
if 'waypointCount' not in i:
newWPFile.write(' ' + i.strip() + '\n')
print('\n%s.gsc successfully converted' % mapName)
newWPFile.close()
for name in mapNameList:
convertWPFile(name.strip())
<|reserved_special_token_1|>
mapName =input('\nEnter map name(s) (omitting the mp_ prefix)\nSeparate map names with comma\n:').lower()
mapNameList =mapName.split(',')
def convertWPFile(mapName):
#Converts mapname_waypoints.gsc file (old style PEzBot format) to newer mapname.gsc file (new style Bot Warfare format)
fullMapName ='mp_'+mapName+'_waypoints.gsc'
waypoints = open(fullMapName,'r')
wpLines =waypoints.readlines()
waypoints.close()
wpLinesNew =[]
temp =0
for i,j in enumerate(wpLines):
if i >31:
if 'level.' in j:
#if ('waypoints['+str(temp+1)+']') in j and 'size' not in j:
#wpLinesNew.append('waypoints['+str(temp)+'].use = true;')
#temp +=1
wpLinesNew.append(' '+j[10:])
wpLinesNew.append('return waypoints;\n}')
newMapName =mapName.capitalize()+'.gsc'
newWPFile =open(newMapName,'w')
newWPFile.write(mapName.capitalize()+'()\n{\n waypoints = [];\n')
for i in wpLinesNew:
if 'waypointCount' not in i:
newWPFile.write(' '+i.strip()+'\n')
print('\n%s.gsc successfully converted' %mapName)
newWPFile.close()
for name in mapNameList:
convertWPFile(name.strip())
|
flexible
|
{
"blob_id": "1aacd04234d60e495888fc44abe3fbacf404e0ce",
"index": 5799,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef convertWPFile(mapName):\n fullMapName = 'mp_' + mapName + '_waypoints.gsc'\n waypoints = open(fullMapName, 'r')\n wpLines = waypoints.readlines()\n waypoints.close()\n wpLinesNew = []\n temp = 0\n for i, j in enumerate(wpLines):\n if i > 31:\n if 'level.' in j:\n wpLinesNew.append(' ' + j[10:])\n wpLinesNew.append('return waypoints;\\n}')\n newMapName = mapName.capitalize() + '.gsc'\n newWPFile = open(newMapName, 'w')\n newWPFile.write(mapName.capitalize() + '()\\n{\\n waypoints = [];\\n')\n for i in wpLinesNew:\n if 'waypointCount' not in i:\n newWPFile.write(' ' + i.strip() + '\\n')\n print('\\n%s.gsc successfully converted' % mapName)\n newWPFile.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef convertWPFile(mapName):\n fullMapName = 'mp_' + mapName + '_waypoints.gsc'\n waypoints = open(fullMapName, 'r')\n wpLines = waypoints.readlines()\n waypoints.close()\n wpLinesNew = []\n temp = 0\n for i, j in enumerate(wpLines):\n if i > 31:\n if 'level.' in j:\n wpLinesNew.append(' ' + j[10:])\n wpLinesNew.append('return waypoints;\\n}')\n newMapName = mapName.capitalize() + '.gsc'\n newWPFile = open(newMapName, 'w')\n newWPFile.write(mapName.capitalize() + '()\\n{\\n waypoints = [];\\n')\n for i in wpLinesNew:\n if 'waypointCount' not in i:\n newWPFile.write(' ' + i.strip() + '\\n')\n print('\\n%s.gsc successfully converted' % mapName)\n newWPFile.close()\n\n\nfor name in mapNameList:\n convertWPFile(name.strip())\n",
"step-4": "mapName = input(\n \"\"\"\nEnter map name(s) (omitting the mp_ prefix)\nSeparate map names with comma\n:\"\"\"\n ).lower()\nmapNameList = mapName.split(',')\n\n\ndef convertWPFile(mapName):\n fullMapName = 'mp_' + mapName + '_waypoints.gsc'\n waypoints = open(fullMapName, 'r')\n wpLines = waypoints.readlines()\n waypoints.close()\n wpLinesNew = []\n temp = 0\n for i, j in enumerate(wpLines):\n if i > 31:\n if 'level.' in j:\n wpLinesNew.append(' ' + j[10:])\n wpLinesNew.append('return waypoints;\\n}')\n newMapName = mapName.capitalize() + '.gsc'\n newWPFile = open(newMapName, 'w')\n newWPFile.write(mapName.capitalize() + '()\\n{\\n waypoints = [];\\n')\n for i in wpLinesNew:\n if 'waypointCount' not in i:\n newWPFile.write(' ' + i.strip() + '\\n')\n print('\\n%s.gsc successfully converted' % mapName)\n newWPFile.close()\n\n\nfor name in mapNameList:\n convertWPFile(name.strip())\n",
"step-5": "mapName =input('\\nEnter map name(s) (omitting the mp_ prefix)\\nSeparate map names with comma\\n:').lower()\r\nmapNameList =mapName.split(',')\r\n\r\ndef convertWPFile(mapName):\r\n #Converts mapname_waypoints.gsc file (old style PEzBot format) to newer mapname.gsc file (new style Bot Warfare format)\r\n fullMapName ='mp_'+mapName+'_waypoints.gsc'\r\n waypoints = open(fullMapName,'r')\r\n wpLines =waypoints.readlines()\r\n waypoints.close()\r\n wpLinesNew =[]\r\n temp =0\r\n for i,j in enumerate(wpLines):\r\n if i >31:\r\n if 'level.' in j:\r\n #if ('waypoints['+str(temp+1)+']') in j and 'size' not in j:\r\n #wpLinesNew.append('waypoints['+str(temp)+'].use = true;')\r\n #temp +=1\r\n wpLinesNew.append(' '+j[10:])\r\n wpLinesNew.append('return waypoints;\\n}')\r\n newMapName =mapName.capitalize()+'.gsc'\r\n newWPFile =open(newMapName,'w')\r\n newWPFile.write(mapName.capitalize()+'()\\n{\\n waypoints = [];\\n')\r\n for i in wpLinesNew:\r\n if 'waypointCount' not in i:\r\n newWPFile.write(' '+i.strip()+'\\n')\r\n print('\\n%s.gsc successfully converted' %mapName)\r\n newWPFile.close()\r\n \r\nfor name in mapNameList:\r\n convertWPFile(name.strip())\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Write a function remove_duplicates that takes in a list and removes elements of the list that are the same.
#For example: remove_duplicates([1,1,2,2])
#should return [1,2].
#Do not modify the list you take as input! Instead, return a new list.
def remove_duplicates(lst_of_items):
new_list=list()
#dict={}
for item in lst_of_items:
#dict[item]
if item not in new_list:
new_list.append(item)
#print item
return new_list
print remove_duplicates([1,3,1,2,2,3,3,3])
|
normal
|
{
"blob_id": "b4d31fd05f8a9d66dcfffb55d805ab93d7ff9cdf",
"index": 5441,
"step-1": "#Write a function remove_duplicates that takes in a list and removes elements of the list that are the same.\n\n#For example: remove_duplicates([1,1,2,2])\n#should return [1,2].\n\n#Do not modify the list you take as input! Instead, return a new list.\n\ndef remove_duplicates(lst_of_items):\n\tnew_list=list()\n #dict={}\n\tfor item in lst_of_items:\n\t #dict[item]\n if item not in new_list:\n\t new_list.append(item)\n #print item\n\n\treturn new_list\n\nprint remove_duplicates([1,3,1,2,2,3,3,3])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from sklearn import svm, metrics, tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
my_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str')
training_data = my_data[:, 0:6]
validation_data = my_data[:, 6]
classifiers = [
tree.DecisionTreeClassifier(max_depth=5),
tree.DecisionTreeClassifier(max_depth=8),
tree.DecisionTreeClassifier(max_depth=10),
svm.SVC(kernel='linear'),
svm.SVC(kernel='rbf'),
AdaBoostClassifier(n_estimators=50),
AdaBoostClassifier(n_estimators=100),
KNeighborsClassifier(3),
KNeighborsClassifier(5),
KNeighborsClassifier(7)
]
for classifier in classifiers:
classifier.fit(training_data[:1500], validation_data[:1500])
expected = validation_data[681:]
predicted = classifier.predict(training_data[681:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
|
normal
|
{
"blob_id": "3024359710148bfbb15677973555f214b1f878b7",
"index": 1521,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n",
"step-3": "<mask token>\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter\n =',', dtype='str')\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\nclassifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.\n DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(\n max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=\n 100), KNeighborsClassifier(3), KNeighborsClassifier(5),\n KNeighborsClassifier(7)]\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n",
"step-4": "from sklearn import svm, metrics, tree\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv', delimiter\n =',', dtype='str')\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\nclassifiers = [tree.DecisionTreeClassifier(max_depth=5), tree.\n DecisionTreeClassifier(max_depth=8), tree.DecisionTreeClassifier(\n max_depth=10), svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50), AdaBoostClassifier(n_estimators=\n 100), KNeighborsClassifier(3), KNeighborsClassifier(5),\n KNeighborsClassifier(7)]\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print('Classification report for classifier %s:\\n%s\\n' % (classifier,\n metrics.classification_report(expected, predicted)))\n print('Confusion matrix:\\n%s' % metrics.confusion_matrix(expected,\n predicted))\n",
"step-5": "from sklearn import svm, metrics, tree\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\n\n\nmy_data = np.loadtxt('edited_data/dataset_regression_edited.csv',delimiter=',', dtype='str')\n\ntraining_data = my_data[:, 0:6]\nvalidation_data = my_data[:, 6]\n\n\nclassifiers = [\n tree.DecisionTreeClassifier(max_depth=5),\n tree.DecisionTreeClassifier(max_depth=8),\n tree.DecisionTreeClassifier(max_depth=10),\n svm.SVC(kernel='linear'),\n svm.SVC(kernel='rbf'),\n AdaBoostClassifier(n_estimators=50),\n AdaBoostClassifier(n_estimators=100),\n KNeighborsClassifier(3),\n KNeighborsClassifier(5),\n KNeighborsClassifier(7)\n]\n\n\nfor classifier in classifiers:\n classifier.fit(training_data[:1500], validation_data[:1500])\n expected = validation_data[681:]\n predicted = classifier.predict(training_data[681:])\n print(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\n print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def convert_torch_to_flow(model, torch_weight_path, save_path):
parameters = torch.load(torch_weight_path)
new_parameters = dict()
for key, value in parameters.items():
if 'num_batches_tracked' not in key:
val = value.detach().cpu().numpy()
new_parameters[key] = val
model.load_state_dict(new_parameters)
flow.save(model.state_dict(), save_path)
<|reserved_special_token_1|>
import oneflow as flow
import torch
def convert_torch_to_flow(model, torch_weight_path, save_path):
parameters = torch.load(torch_weight_path)
new_parameters = dict()
for key, value in parameters.items():
if 'num_batches_tracked' not in key:
val = value.detach().cpu().numpy()
new_parameters[key] = val
model.load_state_dict(new_parameters)
flow.save(model.state_dict(), save_path)
<|reserved_special_token_1|>
import oneflow as flow
import torch
def convert_torch_to_flow(model, torch_weight_path, save_path):
parameters = torch.load(torch_weight_path)
new_parameters = dict()
for key, value in parameters.items():
if "num_batches_tracked" not in key:
val = value.detach().cpu().numpy()
new_parameters[key] = val
model.load_state_dict(new_parameters)
flow.save(model.state_dict(), save_path)
|
flexible
|
{
"blob_id": "8a3cf65550893367b9001369111fa19a3e998d82",
"index": 9589,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if 'num_batches_tracked' not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-3": "import oneflow as flow\nimport torch\n\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if 'num_batches_tracked' not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-4": "import oneflow as flow\nimport torch\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if \"num_batches_tracked\" not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
def normal(data,mean,variance):
# print data-mean
return -1*(((data-mean)**2)/(2*variance)) - (0.5 * math.log(2*3.1415*variance))
a = math.log(0.33333) + normal(67.7854,6.0998,13.5408)
b = math.log(0.33333) + normal(67.7854,119.3287,9.4803)
c = math.log(0.33333) + normal(67.7854,65.7801,12.6203)
d = math.exp(a) + math.exp(b) + math.exp(c)
print math.exp(a)
print math.exp(b)
print math.exp(c)
print math.exp(a)/d
|
normal
|
{
"blob_id": "0edca9893d62eea6513543a1d3dd960e9e95d573",
"index": 7505,
"step-1": "import math\n\ndef normal(data,mean,variance):\n\t# print data-mean\n\treturn -1*(((data-mean)**2)/(2*variance)) - (0.5 * math.log(2*3.1415*variance))\n\na = math.log(0.33333) + normal(67.7854,6.0998,13.5408)\nb = math.log(0.33333) + normal(67.7854,119.3287,9.4803)\nc = math.log(0.33333) + normal(67.7854,65.7801,12.6203)\n\nd = math.exp(a) + math.exp(b) + math.exp(c)\n\nprint math.exp(a)\nprint math.exp(b)\nprint math.exp(c)\n\nprint math.exp(a)/d",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for n in range(2, 51):
for k in range(n, n * n + 1):
queries.append((n, k))
print(len(queries))
for n, k in queries:
print(n, k)
<|reserved_special_token_1|>
queries = []
for n in range(2, 51):
for k in range(n, n * n + 1):
queries.append((n, k))
print(len(queries))
for n, k in queries:
print(n, k)
|
flexible
|
{
"blob_id": "798d5c68a0aa2057c28d7f333905f20fef965d70",
"index": 2850,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(2, 51):\n for k in range(n, n * n + 1):\n queries.append((n, k))\nprint(len(queries))\nfor n, k in queries:\n print(n, k)\n",
"step-3": "queries = []\nfor n in range(2, 51):\n for k in range(n, n * n + 1):\n queries.append((n, k))\nprint(len(queries))\nfor n, k in queries:\n print(n, k)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def compute_vis(X, F):
vis = np.matmul(X, np.transpose(F)).astype(np.complex64)
return vis
def compute_vis_grad(vis, Z, F):
Z_vis = compute_vis(Z, F)
grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)
return grad.real
<|reserved_special_token_0|>
def compute_amp_grad(amp, Z, A, sigma):
"""
Compute gradient of visibility amplitude.
"""
i1 = np.dot(A, Z)
amp_samples = np.abs(i1)
pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1
out = -2.0 / len(amp) * np.real(np.dot(pp, A))
return out
def chisq_amp(amp, Z, F, sigma):
""" Compute and return chi-squared of amplitude between X and Z. """
amp_Z = compute_amp(Z, F)
chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)
return chisq
def compute_cphase(X, F_cphase):
""" Given an image X and the DFT matrices from three baselines,
compute and return its closure phase. """
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
X = np.array(X)
vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase = np.angle(vis1 * vis2 * vis3)
return cphase
def compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):
"""
Compute gradient of closure phase chi-squared
cphase : closure phase of true image
Z : predicted image vector
F_cphase : 3 DFT matrices from three baselines in a closure triangle
"""
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase_samples = np.angle(i1 * i2 * i3)
pref = np.sin(cphase - cphase_samples) / sigma ** 2
pt1 = pref / i1
pt2 = pref / i2
pt3 = pref / i3
out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +
np.dot(pt3, A3))
return out.reshape(npix ** 2)
def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):
"""Closure Phase reduced chi-squared loss."""
cphase_samples = compute_cphase(Z, F_cphase)
chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -
cphase_samples)) / sigma_cphase ** 2)
return chisq
def compute_camp(X, Amatrices):
"""
Compute closure amplitude of image vector X.
"""
i1 = np.dot(Amatrices[0], X)
i2 = np.dot(Amatrices[1], X)
i3 = np.dot(Amatrices[2], X)
i4 = np.dot(Amatrices[3], X)
camp = np.abs(i1 * i2 / (i3 * i4))
return camp
def compute_camp_grad(camp, Z, Amatrices, sigma):
"""
The gradient of the closure amplitude chi-squared
camp: Closure amplitudes of true image
Z: Predicted image vector
Amatrices: DFT matrices of four baselines
"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
pp = (camp - camp_samples) * camp_samples / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(camp) * np.real(out)
<|reserved_special_token_0|>
def compute_lgcamp(X, Amatrices):
""" Compute log closure amplitude of image vector X """
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
return lgcamp
<|reserved_special_token_0|>
def chisq_lgcamp(lgcamp, X, Amatrices, sigma):
"""Log Closure Amplitudes reduced chi-squared"""
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)
return chisq
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compute_vis(X, F):
vis = np.matmul(X, np.transpose(F)).astype(np.complex64)
return vis
def compute_vis_grad(vis, Z, F):
Z_vis = compute_vis(Z, F)
grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)
return grad.real
<|reserved_special_token_0|>
def compute_amp_grad(amp, Z, A, sigma):
"""
Compute gradient of visibility amplitude.
"""
i1 = np.dot(A, Z)
amp_samples = np.abs(i1)
pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1
out = -2.0 / len(amp) * np.real(np.dot(pp, A))
return out
def chisq_amp(amp, Z, F, sigma):
""" Compute and return chi-squared of amplitude between X and Z. """
amp_Z = compute_amp(Z, F)
chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)
return chisq
def compute_cphase(X, F_cphase):
""" Given an image X and the DFT matrices from three baselines,
compute and return its closure phase. """
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
X = np.array(X)
vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase = np.angle(vis1 * vis2 * vis3)
return cphase
def compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):
"""
Compute gradient of closure phase chi-squared
cphase : closure phase of true image
Z : predicted image vector
F_cphase : 3 DFT matrices from three baselines in a closure triangle
"""
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase_samples = np.angle(i1 * i2 * i3)
pref = np.sin(cphase - cphase_samples) / sigma ** 2
pt1 = pref / i1
pt2 = pref / i2
pt3 = pref / i3
out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +
np.dot(pt3, A3))
return out.reshape(npix ** 2)
def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):
"""Closure Phase reduced chi-squared loss."""
cphase_samples = compute_cphase(Z, F_cphase)
chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -
cphase_samples)) / sigma_cphase ** 2)
return chisq
def compute_camp(X, Amatrices):
"""
Compute closure amplitude of image vector X.
"""
i1 = np.dot(Amatrices[0], X)
i2 = np.dot(Amatrices[1], X)
i3 = np.dot(Amatrices[2], X)
i4 = np.dot(Amatrices[3], X)
camp = np.abs(i1 * i2 / (i3 * i4))
return camp
def compute_camp_grad(camp, Z, Amatrices, sigma):
"""
The gradient of the closure amplitude chi-squared
camp: Closure amplitudes of true image
Z: Predicted image vector
Amatrices: DFT matrices of four baselines
"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
pp = (camp - camp_samples) * camp_samples / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(camp) * np.real(out)
def chisq_camp(camp, Z, Amatrices, sigma):
"""Closure Amplitudes reduced chi-squared loss."""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)
return chisq
def compute_lgcamp(X, Amatrices):
""" Compute log closure amplitude of image vector X """
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
return lgcamp
def compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):
"""The gradient of the Log closure amplitude chi-squared"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.
abs(i3)) - np.log(np.abs(i4))
pp = (lgcamp - lgcamp_samples) / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(lgcamp) * np.real(out)
def chisq_lgcamp(lgcamp, X, Amatrices, sigma):
"""Log Closure Amplitudes reduced chi-squared"""
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)
return chisq
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compute_vis(X, F):
vis = np.matmul(X, np.transpose(F)).astype(np.complex64)
return vis
def compute_vis_grad(vis, Z, F):
Z_vis = compute_vis(Z, F)
grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)
return grad.real
def chisq_vis(vis, Z, F, sigma):
"""
Compute mean chi-squared of visibilities of Z.
"""
samples = compute_vis(Z, F)
chisq = np.sum(np.abs((samples - vis) / sigma) ** 2) / (2 * len(vis))
return chisq
def compute_amp(X, F):
""" Given an image X and DFT matrix F, compute and return its
visibility amplitude. """
amp = np.abs(np.dot(F, X))
return amp
def compute_amp_grad(amp, Z, A, sigma):
"""
Compute gradient of visibility amplitude.
"""
i1 = np.dot(A, Z)
amp_samples = np.abs(i1)
pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1
out = -2.0 / len(amp) * np.real(np.dot(pp, A))
return out
def chisq_amp(amp, Z, F, sigma):
""" Compute and return chi-squared of amplitude between X and Z. """
amp_Z = compute_amp(Z, F)
chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)
return chisq
def compute_cphase(X, F_cphase):
""" Given an image X and the DFT matrices from three baselines,
compute and return its closure phase. """
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
X = np.array(X)
vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase = np.angle(vis1 * vis2 * vis3)
return cphase
def compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):
"""
Compute gradient of closure phase chi-squared
cphase : closure phase of true image
Z : predicted image vector
F_cphase : 3 DFT matrices from three baselines in a closure triangle
"""
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase_samples = np.angle(i1 * i2 * i3)
pref = np.sin(cphase - cphase_samples) / sigma ** 2
pt1 = pref / i1
pt2 = pref / i2
pt3 = pref / i3
out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +
np.dot(pt3, A3))
return out.reshape(npix ** 2)
def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):
"""Closure Phase reduced chi-squared loss."""
cphase_samples = compute_cphase(Z, F_cphase)
chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -
cphase_samples)) / sigma_cphase ** 2)
return chisq
def compute_camp(X, Amatrices):
"""
Compute closure amplitude of image vector X.
"""
i1 = np.dot(Amatrices[0], X)
i2 = np.dot(Amatrices[1], X)
i3 = np.dot(Amatrices[2], X)
i4 = np.dot(Amatrices[3], X)
camp = np.abs(i1 * i2 / (i3 * i4))
return camp
def compute_camp_grad(camp, Z, Amatrices, sigma):
"""
The gradient of the closure amplitude chi-squared
camp: Closure amplitudes of true image
Z: Predicted image vector
Amatrices: DFT matrices of four baselines
"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
pp = (camp - camp_samples) * camp_samples / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(camp) * np.real(out)
def chisq_camp(camp, Z, Amatrices, sigma):
"""Closure Amplitudes reduced chi-squared loss."""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)
return chisq
def compute_lgcamp(X, Amatrices):
""" Compute log closure amplitude of image vector X """
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
return lgcamp
def compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):
"""The gradient of the Log closure amplitude chi-squared"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.
abs(i3)) - np.log(np.abs(i4))
pp = (lgcamp - lgcamp_samples) / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(lgcamp) * np.real(out)
def chisq_lgcamp(lgcamp, X, Amatrices, sigma):
"""Log Closure Amplitudes reduced chi-squared"""
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)
return chisq
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
def compute_vis(X, F):
vis = np.matmul(X, np.transpose(F)).astype(np.complex64)
return vis
def compute_vis_grad(vis, Z, F):
Z_vis = compute_vis(Z, F)
grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)
return grad.real
def chisq_vis(vis, Z, F, sigma):
"""
Compute mean chi-squared of visibilities of Z.
"""
samples = compute_vis(Z, F)
chisq = np.sum(np.abs((samples - vis) / sigma) ** 2) / (2 * len(vis))
return chisq
def compute_amp(X, F):
""" Given an image X and DFT matrix F, compute and return its
visibility amplitude. """
amp = np.abs(np.dot(F, X))
return amp
def compute_amp_grad(amp, Z, A, sigma):
"""
Compute gradient of visibility amplitude.
"""
i1 = np.dot(A, Z)
amp_samples = np.abs(i1)
pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1
out = -2.0 / len(amp) * np.real(np.dot(pp, A))
return out
def chisq_amp(amp, Z, F, sigma):
""" Compute and return chi-squared of amplitude between X and Z. """
amp_Z = compute_amp(Z, F)
chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)
return chisq
def compute_cphase(X, F_cphase):
""" Given an image X and the DFT matrices from three baselines,
compute and return its closure phase. """
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
X = np.array(X)
vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase = np.angle(vis1 * vis2 * vis3)
return cphase
def compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):
"""
Compute gradient of closure phase chi-squared
cphase : closure phase of true image
Z : predicted image vector
F_cphase : 3 DFT matrices from three baselines in a closure triangle
"""
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)
i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)
i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)
cphase_samples = np.angle(i1 * i2 * i3)
pref = np.sin(cphase - cphase_samples) / sigma ** 2
pt1 = pref / i1
pt2 = pref / i2
pt3 = pref / i3
out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +
np.dot(pt3, A3))
return out.reshape(npix ** 2)
def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):
"""Closure Phase reduced chi-squared loss."""
cphase_samples = compute_cphase(Z, F_cphase)
chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -
cphase_samples)) / sigma_cphase ** 2)
return chisq
def compute_camp(X, Amatrices):
"""
Compute closure amplitude of image vector X.
"""
i1 = np.dot(Amatrices[0], X)
i2 = np.dot(Amatrices[1], X)
i3 = np.dot(Amatrices[2], X)
i4 = np.dot(Amatrices[3], X)
camp = np.abs(i1 * i2 / (i3 * i4))
return camp
def compute_camp_grad(camp, Z, Amatrices, sigma):
"""
The gradient of the closure amplitude chi-squared
camp: Closure amplitudes of true image
Z: Predicted image vector
Amatrices: DFT matrices of four baselines
"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
pp = (camp - camp_samples) * camp_samples / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(camp) * np.real(out)
def chisq_camp(camp, Z, Amatrices, sigma):
"""Closure Amplitudes reduced chi-squared loss."""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs(i1 * i2 / (i3 * i4))
chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)
return chisq
def compute_lgcamp(X, Amatrices):
""" Compute log closure amplitude of image vector X """
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
return lgcamp
def compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):
"""The gradient of the Log closure amplitude chi-squared"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.
abs(i3)) - np.log(np.abs(i4))
pp = (lgcamp - lgcamp_samples) / sigma ** 2
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,
Amatrices[2]) + np.dot(pt4, Amatrices[3])
return -2.0 / len(lgcamp) * np.real(out)
def chisq_lgcamp(lgcamp, X, Amatrices, sigma):
"""Log Closure Amplitudes reduced chi-squared"""
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)
return chisq
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 14 20:35:10 2020
@author: Johanna
"""
import numpy as np
###############################################################################
# Complex Visibility Functions
###############################################################################
def compute_vis(X, F):
vis = np.matmul(X, np.transpose(F)).astype(np.complex64)
return vis
def compute_vis_grad(vis, Z, F):
Z_vis = compute_vis(Z, F)
grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)
return grad.real
def chisq_vis(vis, Z, F, sigma):
'''
Compute mean chi-squared of visibilities of Z.
'''
samples = compute_vis(Z, F)
chisq = np.sum(np.abs((samples-vis)/sigma)**2)/(2*len(vis))
return chisq
###############################################################################
# Visibility Amplitude Functions
###############################################################################
def compute_amp(X, F):
''' Given an image X and DFT matrix F, compute and return its
visibility amplitude. '''
amp = np.abs(np.dot(F, X))
return amp
def compute_amp_grad(amp, Z, A, sigma):
'''
Compute gradient of visibility amplitude.
'''
i1 = np.dot(A, Z)
amp_samples = np.abs(i1)
pp = ((amp - amp_samples) * amp_samples) / (sigma**2) / i1
out = (-2.0/len(amp)) * np.real(np.dot(pp, A))
return out
def chisq_amp(amp, Z, F, sigma):
''' Compute and return chi-squared of amplitude between X and Z. '''
amp_Z = compute_amp(Z, F)
chisq = np.sum(np.abs((amp - amp_Z)/sigma)**2)/len(amp)
return chisq
###############################################################################
# Closure Phase Functions
###############################################################################
def compute_cphase(X, F_cphase):
''' Given an image X and the DFT matrices from three baselines,
compute and return its closure phase. '''
# Get fourier matrices of each baseline
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
X = np.array(X)
# Compute observed closure phase of image
vis1 = np.matmul(X.reshape((1,-1)), np.transpose(A1)).astype(np.complex64)
vis2 = np.matmul(X.reshape((1,-1)), np.transpose(A2)).astype(np.complex64)
vis3 = np.matmul(X.reshape((1,-1)), np.transpose(A3)).astype(np.complex64)
cphase = np.angle(vis1 * vis2 * vis3)
return cphase
def compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):
'''
Compute gradient of closure phase chi-squared
cphase : closure phase of true image
Z : predicted image vector
F_cphase : 3 DFT matrices from three baselines in a closure triangle
'''
# Get fourier matrices of each baseline
A1 = F_cphase[:, :, 0]
A2 = F_cphase[:, :, 1]
A3 = F_cphase[:, :, 2]
i1 = np.matmul(Z.reshape((1,-1)), np.transpose(A1)).astype(np.complex64)
i2 = np.matmul(Z.reshape((1,-1)), np.transpose(A2)).astype(np.complex64)
i3 = np.matmul(Z.reshape((1,-1)), np.transpose(A3)).astype(np.complex64)
cphase_samples = np.angle(i1 * i2 * i3)
pref = np.sin(cphase - cphase_samples)/(sigma**2)
pt1 = pref/i1
pt2 = pref/i2
pt3 = pref/i3
out = -(2.0/len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) + np.dot(pt3, A3))
return out.reshape(npix**2)
def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):
"""Closure Phase reduced chi-squared loss."""
cphase_samples = compute_cphase(Z, F_cphase)
chisq= (2.0/len(cphase)) * np.sum((1.0 - np.cos(cphase-cphase_samples))/(sigma_cphase**2))
return chisq
###############################################################################
# Closure Amplitude Functions
###############################################################################
def compute_camp(X, Amatrices):
'''
Compute closure amplitude of image vector X.
'''
i1 = np.dot(Amatrices[0], X)
i2 = np.dot(Amatrices[1], X)
i3 = np.dot(Amatrices[2], X)
i4 = np.dot(Amatrices[3], X)
camp = np.abs((i1 * i2)/(i3 * i4))
return camp
def compute_camp_grad(camp, Z, Amatrices, sigma):
"""
The gradient of the closure amplitude chi-squared
camp: Closure amplitudes of true image
Z: Predicted image vector
Amatrices: DFT matrices of four baselines
"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs((i1 * i2)/(i3 * i4))
pp = ((camp - camp_samples) * camp_samples)/(sigma**2)
pt1 = pp/i1
pt2 = pp/i2
pt3 = -pp/i3
pt4 = -pp/i4
out = (np.dot(pt1, Amatrices[0]) +
np.dot(pt2, Amatrices[1]) +
np.dot(pt3, Amatrices[2]) +
np.dot(pt4, Amatrices[3]))
return (-2.0/len(camp)) * np.real(out)
def chisq_camp(camp, Z, Amatrices, sigma):
"""Closure Amplitudes reduced chi-squared loss."""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
camp_samples = np.abs((i1 * i2)/(i3 * i4))
chisq = np.sum(np.abs((camp - camp_samples)/sigma)**2)/len(camp)
return chisq
###############################################################################
# Log Closure Amplitude Functions
###############################################################################
def compute_lgcamp(X, Amatrices):
''' Compute log closure amplitude of image vector X '''
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
return lgcamp
def compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):
"""The gradient of the Log closure amplitude chi-squared"""
i1 = np.dot(Amatrices[0], Z)
i2 = np.dot(Amatrices[1], Z)
i3 = np.dot(Amatrices[2], Z)
i4 = np.dot(Amatrices[3], Z)
lgcamp_samples = (np.log(np.abs(i1)) +
np.log(np.abs(i2)) -
np.log(np.abs(i3)) -
np.log(np.abs(i4)))
pp = (lgcamp - lgcamp_samples) / (sigma**2)
pt1 = pp / i1
pt2 = pp / i2
pt3 = -pp / i3
pt4 = -pp / i4
out = (np.dot(pt1, Amatrices[0]) +
np.dot(pt2, Amatrices[1]) +
np.dot(pt3, Amatrices[2]) +
np.dot(pt4, Amatrices[3]))
return (-2.0/len(lgcamp)) * np.real(out)
def chisq_lgcamp(lgcamp, X, Amatrices, sigma):
"""Log Closure Amplitudes reduced chi-squared"""
a1 = np.abs(np.dot(Amatrices[0], X))
a2 = np.abs(np.dot(Amatrices[1], X))
a3 = np.abs(np.dot(Amatrices[2], X))
a4 = np.abs(np.dot(Amatrices[3], X))
samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)
chisq = np.sum(np.abs((lgcamp - samples)/sigma)**2) / (len(lgcamp))
return chisq
|
flexible
|
{
"blob_id": "ea3217be80b6d1d3a400139bc4a91870cd2f1d87",
"index": 5118,
"step-1": "<mask token>\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\n<mask token>\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\n<mask token>\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\n<mask token>\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n",
"step-2": "<mask token>\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\n<mask token>\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)\n return chisq\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.\n abs(i3)) - np.log(np.abs(i4))\n pp = (lgcamp - lgcamp_samples) / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(lgcamp) * np.real(out)\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n",
"step-3": "<mask token>\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\ndef chisq_vis(vis, Z, F, sigma):\n \"\"\" \n Compute mean chi-squared of visibilities of Z.\n \"\"\"\n samples = compute_vis(Z, F)\n chisq = np.sum(np.abs((samples - vis) / sigma) ** 2) / (2 * len(vis))\n return chisq\n\n\ndef compute_amp(X, F):\n \"\"\" Given an image X and DFT matrix F, compute and return its \n visibility amplitude. \"\"\"\n amp = np.abs(np.dot(F, X))\n return amp\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)\n return chisq\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.\n abs(i3)) - np.log(np.abs(i4))\n pp = (lgcamp - lgcamp_samples) / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(lgcamp) * np.real(out)\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n",
"step-4": "<mask token>\nimport numpy as np\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\ndef chisq_vis(vis, Z, F, sigma):\n \"\"\" \n Compute mean chi-squared of visibilities of Z.\n \"\"\"\n samples = compute_vis(Z, F)\n chisq = np.sum(np.abs((samples - vis) / sigma) ** 2) / (2 * len(vis))\n return chisq\n\n\ndef compute_amp(X, F):\n \"\"\" Given an image X and DFT matrix F, compute and return its \n visibility amplitude. \"\"\"\n amp = np.abs(np.dot(F, X))\n return amp\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)\n return chisq\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.\n abs(i3)) - np.log(np.abs(i4))\n pp = (lgcamp - lgcamp_samples) / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(lgcamp) * np.real(out)\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 14 20:35:10 2020\n\n@author: Johanna\n\"\"\"\nimport numpy as np\n\n###############################################################################\n# Complex Visibility Functions\n###############################################################################\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\ndef chisq_vis(vis, Z, F, sigma):\n ''' \n Compute mean chi-squared of visibilities of Z.\n '''\n samples = compute_vis(Z, F)\n chisq = np.sum(np.abs((samples-vis)/sigma)**2)/(2*len(vis))\n return chisq\n\n###############################################################################\n# Visibility Amplitude Functions\n###############################################################################\n \ndef compute_amp(X, F):\n ''' Given an image X and DFT matrix F, compute and return its \n visibility amplitude. '''\n amp = np.abs(np.dot(F, X))\n return amp\n\ndef compute_amp_grad(amp, Z, A, sigma):\n ''' \n Compute gradient of visibility amplitude.\n '''\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n\n pp = ((amp - amp_samples) * amp_samples) / (sigma**2) / i1\n out = (-2.0/len(amp)) * np.real(np.dot(pp, A))\n return out\n\ndef chisq_amp(amp, Z, F, sigma):\n ''' Compute and return chi-squared of amplitude between X and Z. '''\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z)/sigma)**2)/len(amp)\n return chisq \n\n###############################################################################\n# Closure Phase Functions\n###############################################################################\n\ndef compute_cphase(X, F_cphase):\n ''' Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. '''\n # Get fourier matrices of each baseline \n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n \n X = np.array(X)\n \n # Compute observed closure phase of image\n vis1 = np.matmul(X.reshape((1,-1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1,-1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1,-1)), np.transpose(A3)).astype(np.complex64)\n \n cphase = np.angle(vis1 * vis2 * vis3) \n \n return cphase\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n ''' \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n '''\n # Get fourier matrices of each baseline \n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n \n i1 = np.matmul(Z.reshape((1,-1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1,-1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1,-1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n \n pref = np.sin(cphase - cphase_samples)/(sigma**2)\n pt1 = pref/i1\n pt2 = pref/i2\n pt3 = pref/i3\n out = -(2.0/len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) + np.dot(pt3, A3))\n \n return out.reshape(npix**2)\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq= (2.0/len(cphase)) * np.sum((1.0 - np.cos(cphase-cphase_samples))/(sigma_cphase**2))\n return chisq \n \n###############################################################################\n# Closure Amplitude Functions\n###############################################################################\n \ndef compute_camp(X, Amatrices):\n '''\n Compute closure amplitude of image vector X.\n '''\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n \n camp = np.abs((i1 * i2)/(i3 * i4))\n return camp\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs((i1 * i2)/(i3 * i4))\n\n pp = ((camp - camp_samples) * camp_samples)/(sigma**2)\n pt1 = pp/i1\n pt2 = pp/i2\n pt3 = -pp/i3\n pt4 = -pp/i4\n out = (np.dot(pt1, Amatrices[0]) +\n np.dot(pt2, Amatrices[1]) +\n np.dot(pt3, Amatrices[2]) +\n np.dot(pt4, Amatrices[3]))\n\n return (-2.0/len(camp)) * np.real(out)\n \ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs((i1 * i2)/(i3 * i4))\n\n chisq = np.sum(np.abs((camp - camp_samples)/sigma)**2)/len(camp)\n return chisq \n\n \n###############################################################################\n# Log Closure Amplitude Functions\n###############################################################################\n \ndef compute_lgcamp(X, Amatrices):\n ''' Compute log closure amplitude of image vector X '''\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n \n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = (np.log(np.abs(i1)) +\n np.log(np.abs(i2)) - \n np.log(np.abs(i3)) -\n np.log(np.abs(i4)))\n\n pp = (lgcamp - lgcamp_samples) / (sigma**2)\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = (np.dot(pt1, Amatrices[0]) +\n np.dot(pt2, Amatrices[1]) +\n np.dot(pt3, Amatrices[2]) +\n np.dot(pt4, Amatrices[3]))\n\n return (-2.0/len(lgcamp)) * np.real(out)\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples)/sigma)**2) / (len(lgcamp))\n return chisq \n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
11,
13,
15,
16,
17
]
}
|
[
11,
13,
15,
16,
17
] |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import shutil
import tempfile
import uuid
from mock import patch
from cloudify.workflows import local
from cloudify.utils import setup_logger
from cloudify_agent.tests import resources
from cloudify_agent.tests.utils import (
FileServer,
get_source_uri,
get_requirements_uri)
from cloudify_agent.tests.api.pm import BaseDaemonLiveTestCase
from cloudify_agent.tests.api.pm import only_ci, only_os
from cloudify_agent.api import utils
##############################################################################
# these tests run a local workflow to install the agent on the local machine.
# it should support both windows and linux machines. and thus, testing the
# LocalWindowsAgentInstaller and LocalLinuxAgentInstaller.
# the remote use cases are tested as system tests because they require
# actually launching VM's from the test.
##############################################################################
class AgentInstallerLocalTest(BaseDaemonLiveTestCase):
"""
these tests run local workflows in order to invoke the installer
operations. the remote use case is tested as part of the system tests.
"""
@classmethod
def setUpClass(cls):
cls.logger = setup_logger(cls.__name__)
cls.source_url = get_source_uri()
cls.requirements_file = get_requirements_uri()
def setUp(self):
super(AgentInstallerLocalTest, self).setUp()
self.resource_base = tempfile.mkdtemp(
prefix='file-server-resource-base')
self.fs = FileServer(
root_path=self.resource_base)
self.fs.start()
self.addCleanup(self.fs.stop)
self.addCleanup(shutil.rmtree, self.resource_base)
@patch.dict('agent_packager.logger.LOGGER',
disable_existing_loggers=False)
@patch('cloudify.workflows.local._validate_node')
@only_ci
def test_local_agent_from_package(self, _):
agent_name = utils.internal.generate_agent_name()
agent_queue = '{0}-queue'.format(agent_name)
blueprint_path = resources.get_resource(
'blueprints/agent-from-package/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
inputs = {
'resource_base': self.resource_base,
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue,
'file_server_port': self.fs.port
}
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_os('posix')
@patch('cloudify.workflows.local._validate_node')
@only_ci
def test_local_agent_from_package_long_name(self, _):
"""Agent still works with a filepath longer than 128 bytes
Paths longer than 128 bytes break shebangs on linux.
"""
agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))
agent_queue = '{0}-queue'.format(agent_name)
blueprint_path = resources.get_resource(
'blueprints/agent-from-package/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
inputs = {
'resource_base': self.resource_base,
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue,
'file_server_port': self.fs.port
}
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_ci
@patch('cloudify.workflows.local._validate_node')
@patch.dict('agent_packager.logger.LOGGER',
disable_existing_loggers=False)
def test_local_agent_from_source(self, _):
agent_name = utils.internal.generate_agent_name()
agent_queue = '{0}-queue'.format(agent_name)
inputs = {
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue
}
blueprint_path = resources.get_resource(
'blueprints/agent-from-source/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_ci
@patch('cloudify.workflows.local._validate_node')
@patch.dict('agent_packager.logger.LOGGER',
disable_existing_loggers=False)
def test_3_2_backwards(self, _):
agent_name = utils.internal.generate_agent_name()
agent_queue = '{0}-queue'.format(agent_name)
inputs = {
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue
}
blueprint_path = resources.get_resource(
'blueprints/3_2-agent-from-source/3_2-agent-from-source.yaml')
self.logger.info('Initiating local env')
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_os('posix')
@only_ci
@patch('cloudify.workflows.local._validate_node')
def test_local_agent_from_source_long_name(self, _):
"""Agent still works with a filepath longer than 128 bytes
This test won't pass on windows because some files within the
virtualenv exceed 256 bytes, and windows doesn't support paths
that long.
"""
agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))
agent_queue = '{0}-queue'.format(agent_name)
inputs = {
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue
}
blueprint_path = resources.get_resource(
'blueprints/agent-from-source/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
|
normal
|
{
"blob_id": "eed3ec2897d4da20b576cb4e2ce95331ae223f76",
"index": 7938,
"step-1": "<mask token>\n\n\nclass AgentInstallerLocalTest(BaseDaemonLiveTestCase):\n <mask token>\n\n @classmethod\n def setUpClass(cls):\n cls.logger = setup_logger(cls.__name__)\n cls.source_url = get_source_uri()\n cls.requirements_file = get_requirements_uri()\n\n def setUp(self):\n super(AgentInstallerLocalTest, self).setUp()\n self.resource_base = tempfile.mkdtemp(prefix=\n 'file-server-resource-base')\n self.fs = FileServer(root_path=self.resource_base)\n self.fs.start()\n self.addCleanup(self.fs.stop)\n self.addCleanup(shutil.rmtree, self.resource_base)\n <mask token>\n <mask token>\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_local_agent_from_source(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AgentInstallerLocalTest(BaseDaemonLiveTestCase):\n <mask token>\n\n @classmethod\n def setUpClass(cls):\n cls.logger = setup_logger(cls.__name__)\n cls.source_url = get_source_uri()\n cls.requirements_file = get_requirements_uri()\n\n def setUp(self):\n super(AgentInstallerLocalTest, self).setUp()\n self.resource_base = tempfile.mkdtemp(prefix=\n 'file-server-resource-base')\n self.fs = FileServer(root_path=self.resource_base)\n self.fs.start()\n self.addCleanup(self.fs.stop)\n self.addCleanup(shutil.rmtree, self.resource_base)\n\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n inputs = {'resource_base': self.resource_base, 'source_url': self.\n source_url, 'requirements_file': self.requirements_file, 'name':\n agent_name, 'queue': agent_queue, 'file_server_port': self.fs.port}\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n <mask token>\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_local_agent_from_source(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_3_2_backwards(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/3_2-agent-from-source/3_2-agent-from-source.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AgentInstallerLocalTest(BaseDaemonLiveTestCase):\n <mask token>\n\n @classmethod\n def setUpClass(cls):\n cls.logger = setup_logger(cls.__name__)\n cls.source_url = get_source_uri()\n cls.requirements_file = get_requirements_uri()\n\n def setUp(self):\n super(AgentInstallerLocalTest, self).setUp()\n self.resource_base = tempfile.mkdtemp(prefix=\n 'file-server-resource-base')\n self.fs = FileServer(root_path=self.resource_base)\n self.fs.start()\n self.addCleanup(self.fs.stop)\n self.addCleanup(shutil.rmtree, self.resource_base)\n\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n inputs = {'resource_base': self.resource_base, 'source_url': self.\n source_url, 'requirements_file': self.requirements_file, 'name':\n agent_name, 'queue': agent_queue, 'file_server_port': self.fs.port}\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_os('posix')\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package_long_name(self, _):\n \"\"\"Agent still works with a filepath longer than 128 bytes\n\n Paths longer than 128 bytes break shebangs on linux.\n \"\"\"\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n inputs = {'resource_base': self.resource_base, 'source_url': self.\n source_url, 'requirements_file': self.requirements_file, 'name':\n agent_name, 'queue': agent_queue, 'file_server_port': self.fs.port}\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_local_agent_from_source(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_3_2_backwards(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/3_2-agent-from-source/3_2-agent-from-source.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_os('posix')\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n def test_local_agent_from_source_long_name(self, _):\n \"\"\"Agent still works with a filepath longer than 128 bytes\n\n This test won't pass on windows because some files within the\n virtualenv exceed 256 bytes, and windows doesn't support paths\n that long.\n \"\"\"\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n",
"step-4": "import shutil\nimport tempfile\nimport uuid\nfrom mock import patch\nfrom cloudify.workflows import local\nfrom cloudify.utils import setup_logger\nfrom cloudify_agent.tests import resources\nfrom cloudify_agent.tests.utils import FileServer, get_source_uri, get_requirements_uri\nfrom cloudify_agent.tests.api.pm import BaseDaemonLiveTestCase\nfrom cloudify_agent.tests.api.pm import only_ci, only_os\nfrom cloudify_agent.api import utils\n\n\nclass AgentInstallerLocalTest(BaseDaemonLiveTestCase):\n \"\"\"\n these tests run local workflows in order to invoke the installer\n operations. the remote use case is tested as part of the system tests.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.logger = setup_logger(cls.__name__)\n cls.source_url = get_source_uri()\n cls.requirements_file = get_requirements_uri()\n\n def setUp(self):\n super(AgentInstallerLocalTest, self).setUp()\n self.resource_base = tempfile.mkdtemp(prefix=\n 'file-server-resource-base')\n self.fs = FileServer(root_path=self.resource_base)\n self.fs.start()\n self.addCleanup(self.fs.stop)\n self.addCleanup(shutil.rmtree, self.resource_base)\n\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n inputs = {'resource_base': self.resource_base, 'source_url': self.\n source_url, 'requirements_file': self.requirements_file, 'name':\n agent_name, 'queue': agent_queue, 'file_server_port': self.fs.port}\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_os('posix')\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package_long_name(self, _):\n \"\"\"Agent still works with a filepath longer than 128 bytes\n\n Paths longer than 128 bytes break shebangs on linux.\n \"\"\"\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n inputs = {'resource_base': self.resource_base, 'source_url': self.\n source_url, 'requirements_file': self.requirements_file, 'name':\n agent_name, 'queue': agent_queue, 'file_server_port': self.fs.port}\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_local_agent_from_source(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER', disable_existing_loggers=False)\n def test_3_2_backwards(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/3_2-agent-from-source/3_2-agent-from-source.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_os('posix')\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n def test_local_agent_from_source_long_name(self, _):\n \"\"\"Agent still works with a filepath longer than 128 bytes\n\n This test won't pass on windows because some files within the\n virtualenv exceed 256 bytes, and windows doesn't support paths\n that long.\n \"\"\"\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n inputs = {'source_url': self.source_url, 'requirements_file': self.\n requirements_file, 'name': agent_name, 'queue': agent_queue}\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName, blueprint_path=\n blueprint_path, inputs=inputs)\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n",
"step-5": "#########\n# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport shutil\nimport tempfile\nimport uuid\n\nfrom mock import patch\n\nfrom cloudify.workflows import local\nfrom cloudify.utils import setup_logger\n\nfrom cloudify_agent.tests import resources\nfrom cloudify_agent.tests.utils import (\n FileServer,\n get_source_uri,\n get_requirements_uri)\nfrom cloudify_agent.tests.api.pm import BaseDaemonLiveTestCase\nfrom cloudify_agent.tests.api.pm import only_ci, only_os\nfrom cloudify_agent.api import utils\n\n\n##############################################################################\n# these tests run a local workflow to install the agent on the local machine.\n# it should support both windows and linux machines. and thus, testing the\n# LocalWindowsAgentInstaller and LocalLinuxAgentInstaller.\n# the remote use cases are tested as system tests because they require\n# actually launching VM's from the test.\n##############################################################################\n\nclass AgentInstallerLocalTest(BaseDaemonLiveTestCase):\n\n \"\"\"\n these tests run local workflows in order to invoke the installer\n operations. the remote use case is tested as part of the system tests.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.logger = setup_logger(cls.__name__)\n cls.source_url = get_source_uri()\n cls.requirements_file = get_requirements_uri()\n\n def setUp(self):\n super(AgentInstallerLocalTest, self).setUp()\n\n self.resource_base = tempfile.mkdtemp(\n prefix='file-server-resource-base')\n self.fs = FileServer(\n root_path=self.resource_base)\n self.fs.start()\n\n self.addCleanup(self.fs.stop)\n self.addCleanup(shutil.rmtree, self.resource_base)\n\n @patch.dict('agent_packager.logger.LOGGER',\n disable_existing_loggers=False)\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package(self, _):\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_os('posix')\n @patch('cloudify.workflows.local._validate_node')\n @only_ci\n def test_local_agent_from_package_long_name(self, _):\n \"\"\"Agent still works with a filepath longer than 128 bytes\n\n Paths longer than 128 bytes break shebangs on linux.\n \"\"\"\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER',\n disable_existing_loggers=False)\n def test_local_agent_from_source(self, _):\n\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n @patch.dict('agent_packager.logger.LOGGER',\n disable_existing_loggers=False)\n def test_3_2_backwards(self, _):\n\n agent_name = utils.internal.generate_agent_name()\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/3_2-agent-from-source/3_2-agent-from-source.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n\n @only_os('posix')\n @only_ci\n @patch('cloudify.workflows.local._validate_node')\n def test_local_agent_from_source_long_name(self, _):\n \"\"\"Agent still works with a filepath longer than 128 bytes\n\n This test won't pass on windows because some files within the\n virtualenv exceed 256 bytes, and windows doesn't support paths\n that long.\n \"\"\"\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)\n",
"step-ids": [
4,
6,
8,
10,
11
]
}
|
[
4,
6,
8,
10,
11
] |
<|reserved_special_token_0|>
class GameList(ListCreateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = 'name', 'gender'
search_fields = '^name',
ordering_fields = 'name',
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(name='score', lookup_expr='gte')
max_score = NumberFilter(name='score', lookup_expr='lte')
from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')
to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')
player_name = AllValuesFilter(name='player__name')
game_name = AllValuesFilter(name='game__name')
class Meta:
model = PlayerScore
fields = ('score', 'from_score_date', 'to_score_date', 'min_score',
'max_score', 'player_name', 'game_name')
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class = PlayerScoreFilter
ordering_fields = 'score', 'score_date'
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ApiRoot(GenericAPIView):
<|reserved_special_token_0|>
def get(self, request, *args, **kwargs):
return Response({'players': reverse(PlayerList.name, request=
request), 'game-categories': reverse(GameCategoryList.name,
request=request), 'game': reverse(GameList.name, request=
request), 'scores': reverse(PlayerScoreList.name, request=
request), 'users': reverse(UserList.name, request=request)})
class GameCategoryList(ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = ScopedRateThrottle,
filter_fields = 'name',
search_fields = '^name',
ordering_fields = 'name',
class GameCategoryDetail(RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = ScopedRateThrottle,
class GameList(ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'
search_fields = '^name',
ordering_fields = 'name', 'release_date'
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = 'name', 'gender'
search_fields = '^name',
ordering_fields = 'name',
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(name='score', lookup_expr='gte')
max_score = NumberFilter(name='score', lookup_expr='lte')
from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')
to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')
player_name = AllValuesFilter(name='player__name')
game_name = AllValuesFilter(name='game__name')
class Meta:
model = PlayerScore
fields = ('score', 'from_score_date', 'to_score_date', 'min_score',
'max_score', 'player_name', 'game_name')
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class = PlayerScoreFilter
ordering_fields = 'score', 'score_date'
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ApiRoot(GenericAPIView):
name = 'api-root'
def get(self, request, *args, **kwargs):
return Response({'players': reverse(PlayerList.name, request=
request), 'game-categories': reverse(GameCategoryList.name,
request=request), 'game': reverse(GameList.name, request=
request), 'scores': reverse(PlayerScoreList.name, request=
request), 'users': reverse(UserList.name, request=request)})
class GameCategoryList(ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = ScopedRateThrottle,
filter_fields = 'name',
search_fields = '^name',
ordering_fields = 'name',
class GameCategoryDetail(RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = ScopedRateThrottle,
class GameList(ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'
search_fields = '^name',
ordering_fields = 'name', 'release_date'
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = 'name', 'gender'
search_fields = '^name',
ordering_fields = 'name',
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(name='score', lookup_expr='gte')
max_score = NumberFilter(name='score', lookup_expr='lte')
from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')
to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')
player_name = AllValuesFilter(name='player__name')
game_name = AllValuesFilter(name='game__name')
class Meta:
model = PlayerScore
fields = ('score', 'from_score_date', 'to_score_date', 'min_score',
'max_score', 'player_name', 'game_name')
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class = PlayerScoreFilter
ordering_fields = 'score', 'score_date'
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from django_filters import NumberFilter, DateTimeFilter, AllValuesFilter
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import permissions
from rest_framework.throttling import ScopedRateThrottle
from rest_framework import filters
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, GenericAPIView, ListAPIView, RetrieveAPIView
from games.models import GameCategory, Game, Player, PlayerScore
from games.serializers import GameCategorySerializer, GameSerializer, PlayerSerializer, PlayerScoreSerializer
from games.serializers import UserSerializer
from games.permissions import IsOwnerOrReadOnly
class ApiRoot(GenericAPIView):
name = 'api-root'
def get(self, request, *args, **kwargs):
return Response({'players': reverse(PlayerList.name, request=
request), 'game-categories': reverse(GameCategoryList.name,
request=request), 'game': reverse(GameList.name, request=
request), 'scores': reverse(PlayerScoreList.name, request=
request), 'users': reverse(UserList.name, request=request)})
class GameCategoryList(ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = ScopedRateThrottle,
filter_fields = 'name',
search_fields = '^name',
ordering_fields = 'name',
class GameCategoryDetail(RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = ScopedRateThrottle,
class GameList(ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'
search_fields = '^name',
ordering_fields = 'name', 'release_date'
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = 'name', 'gender'
search_fields = '^name',
ordering_fields = 'name',
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(name='score', lookup_expr='gte')
max_score = NumberFilter(name='score', lookup_expr='lte')
from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')
to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')
player_name = AllValuesFilter(name='player__name')
game_name = AllValuesFilter(name='game__name')
class Meta:
model = PlayerScore
fields = ('score', 'from_score_date', 'to_score_date', 'min_score',
'max_score', 'player_name', 'game_name')
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class = PlayerScoreFilter
ordering_fields = 'score', 'score_date'
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from django_filters import (
NumberFilter,
DateTimeFilter,
AllValuesFilter
)
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import permissions
from rest_framework.throttling import ScopedRateThrottle
from rest_framework import filters
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
GenericAPIView,
ListAPIView,
RetrieveAPIView
)
from games.models import (
GameCategory,
Game,
Player,
PlayerScore
)
from games.serializers import (
GameCategorySerializer,
GameSerializer,
PlayerSerializer,
PlayerScoreSerializer,
)
from games.serializers import UserSerializer
from games.permissions import IsOwnerOrReadOnly
class ApiRoot(GenericAPIView):
name= 'api-root'
def get(self,request,*args,**kwargs):
return Response(
{
'players':reverse(PlayerList.name,request=request),
'game-categories':reverse(GameCategoryList.name,request=request),
'game':reverse(GameList.name,request=request),
'scores':reverse(PlayerScoreList.name,request=request),
'users': reverse(UserList.name,request=request)
}
)
class GameCategoryList(ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
filter_fields = ('name',)
search_fields = ('^name',)
ordering_fields = ('name',)
class GameCategoryDetail(RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
class GameList(ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
)
filter_fields = (
'name',
'game_category',
'release_date',
'played',
'owner',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
'release_date',
)
def perform_create(self, serializer):
# pass an additional owner field to the create method
# to set the owner to the user recieved in the request
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = (
'name',
'gender',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
)
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(
name='score',lookup_expr='gte'
)
max_score = NumberFilter(
name='score',lookup_expr='lte'
)
from_score_date = DateTimeFilter(
name='score_date',
lookup_expr='gte'
)
to_score_date = DateTimeFilter(
name='score_date',
lookup_expr='lte'
)
player_name = AllValuesFilter(
name='player__name'
)
game_name = AllValuesFilter(
name= 'game__name'
)
class Meta:
model = PlayerScore
fields = (
'score',
'from_score_date',
'to_score_date',
'min_score',
'max_score',
# player__name will be accessed as player_name
'player_name',
#game__name will be accessed as game_name
'game_name'
)
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class =PlayerScoreFilter
ordering_fields = (
'score',
'score_date',
)
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
|
flexible
|
{
"blob_id": "2908d34165fac272c9571be623855a0613c952f3",
"index": 5433,
"step-1": "<mask token>\n\n\nclass GameList(ListCreateAPIView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-2": "<mask token>\n\n\nclass ApiRoot(GenericAPIView):\n <mask token>\n\n def get(self, request, *args, **kwargs):\n return Response({'players': reverse(PlayerList.name, request=\n request), 'game-categories': reverse(GameCategoryList.name,\n request=request), 'game': reverse(GameList.name, request=\n request), 'scores': reverse(PlayerScoreList.name, request=\n request), 'users': reverse(UserList.name, request=request)})\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n filter_fields = 'name',\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'\n search_fields = '^name',\n ordering_fields = 'name', 'release_date'\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-3": "<mask token>\n\n\nclass ApiRoot(GenericAPIView):\n name = 'api-root'\n\n def get(self, request, *args, **kwargs):\n return Response({'players': reverse(PlayerList.name, request=\n request), 'game-categories': reverse(GameCategoryList.name,\n request=request), 'game': reverse(GameList.name, request=\n request), 'scores': reverse(PlayerScoreList.name, request=\n request), 'users': reverse(UserList.name, request=request)})\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n filter_fields = 'name',\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'\n search_fields = '^name',\n ordering_fields = 'name', 'release_date'\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-4": "from django.contrib.auth.models import User\nfrom django_filters import NumberFilter, DateTimeFilter, AllValuesFilter\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework import permissions\nfrom rest_framework.throttling import ScopedRateThrottle\nfrom rest_framework import filters\nfrom rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, GenericAPIView, ListAPIView, RetrieveAPIView\nfrom games.models import GameCategory, Game, Player, PlayerScore\nfrom games.serializers import GameCategorySerializer, GameSerializer, PlayerSerializer, PlayerScoreSerializer\nfrom games.serializers import UserSerializer\nfrom games.permissions import IsOwnerOrReadOnly\n\n\nclass ApiRoot(GenericAPIView):\n name = 'api-root'\n\n def get(self, request, *args, **kwargs):\n return Response({'players': reverse(PlayerList.name, request=\n request), 'game-categories': reverse(GameCategoryList.name,\n request=request), 'game': reverse(GameList.name, request=\n request), 'scores': reverse(PlayerScoreList.name, request=\n request), 'users': reverse(UserList.name, request=request)})\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n filter_fields = 'name',\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'\n search_fields = '^name',\n ordering_fields = 'name', 'release_date'\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-5": "from django.contrib.auth.models import User\nfrom django_filters import (\n NumberFilter,\n DateTimeFilter,\n AllValuesFilter\n)\n\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework import permissions\nfrom rest_framework.throttling import ScopedRateThrottle\nfrom rest_framework import filters\n\nfrom rest_framework.generics import (\n ListCreateAPIView,\n RetrieveUpdateDestroyAPIView,\n GenericAPIView,\n ListAPIView,\n RetrieveAPIView\n)\n\nfrom games.models import (\n GameCategory,\n Game,\n Player,\n PlayerScore\n)\n\nfrom games.serializers import (\n GameCategorySerializer,\n GameSerializer,\n PlayerSerializer,\n PlayerScoreSerializer,\n)\n\nfrom games.serializers import UserSerializer\nfrom games.permissions import IsOwnerOrReadOnly\n\n\n\n\nclass ApiRoot(GenericAPIView):\n name= 'api-root'\n\n def get(self,request,*args,**kwargs):\n return Response(\n {\n 'players':reverse(PlayerList.name,request=request),\n 'game-categories':reverse(GameCategoryList.name,request=request),\n 'game':reverse(GameList.name,request=request),\n 'scores':reverse(PlayerScoreList.name,request=request),\n 'users': reverse(UserList.name,request=request)\n }\n )\n\n\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = (ScopedRateThrottle,)\n filter_fields = ('name',)\n search_fields = ('^name',)\n ordering_fields = ('name',)\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = (ScopedRateThrottle,)\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly\n )\n filter_fields = (\n 'name',\n 'game_category',\n 'release_date',\n 'played',\n 'owner',\n )\n search_fields = (\n '^name',\n )\n ordering_fields = (\n 'name',\n 'release_date',\n )\n\n def perform_create(self, serializer):\n # pass an additional owner field to the create method\n # to set the owner to the user recieved in the request\n serializer.save(owner=self.request.user)\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly\n )\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = (\n 'name',\n 'gender',\n )\n search_fields = (\n '^name',\n )\n ordering_fields = (\n 'name',\n )\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(\n name='score',lookup_expr='gte'\n )\n max_score = NumberFilter(\n name='score',lookup_expr='lte'\n )\n from_score_date = DateTimeFilter(\n name='score_date',\n lookup_expr='gte'\n )\n to_score_date = DateTimeFilter(\n name='score_date',\n lookup_expr='lte'\n )\n player_name = AllValuesFilter(\n name='player__name'\n )\n game_name = AllValuesFilter(\n name= 'game__name'\n )\n\n class Meta:\n model = PlayerScore\n fields = (\n 'score',\n 'from_score_date',\n 'to_score_date',\n 'min_score',\n 'max_score',\n # player__name will be accessed as player_name\n 'player_name',\n #game__name will be accessed as game_name\n 'game_name'\n )\n\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class =PlayerScoreFilter\n ordering_fields = (\n 'score',\n 'score_date',\n )\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n\n\n",
"step-ids": [
18,
25,
26,
27,
28
]
}
|
[
18,
25,
26,
27,
28
] |
from network.utility import *
from entities.message import Message, BroadcastMessage, GroupMessage
from entities.node import Node
from entities.group import GroupBroadcast
from entities.request import Request
import threading
import time
import logging
import random
import json
import socket
from services.user import UserService
class Sender:
def __init__(self, reverseMap, info):
self.reverseMap = reverseMap
self.info = info
def sendMessage(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": self.info.get("groupID", ""),
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(message.toUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendMessageBroadcast(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": self.info.get("groupID", ""),
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendMessageGroup(self, message):
data = {"timestamp": message.timestamp, "message": message.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": message.fromUsername,
"srcGroup": message.groupID,
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": True,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupJoinRequest(self, request):
data = {"message": request.message}
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": request.fromUsername,
"srcGroup": self.info["groupID"],
"desGroup": request.groupID,
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": True,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupAcknowledgeRequest(self, request):
body = b""
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": True,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(request.fromUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupDenyRequest(self, request):
body = b""
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": False,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": True,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": False,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
addr = self.reverseMap.get(request.fromUsername)
worker = SenderWorker(addr, msg)
worker.start()
def sendGroupBroadcast(self):
data = self.info
body = json.dumps(data).encode('utf-8')
header = {
"srcUsername": self.info["username"],
"srcGroup": self.info["groupID"],
"desGroup": "",
"admin": self.info.get("isAdmin", ""),
"member": self.info.get("isMember", ""),
"broadcast": True,
"groupBroadcast": False,
"memberRq": False,
"ackRq": False,
"denyRq": False,
"leaveRq": False,
"nodeRq": False,
"big": False,
"nodeRep": True,
"contentLength": len(body),
}
packedHeader = packHeader(header)
msg = packedHeader + body
for addr in self.reverseMap.values():
worker = SenderWorker(addr, msg)
worker.start()
class SenderWorker(threading.Thread):
def __init__(self, addr, msg):
threading.Thread.__init__(self)
self.addr = addr
self.packageHash = bytes.fromhex(
format(random.getrandbits(256), "x").zfill(64))
self.msg = self.packageHash+msg
self.sock = None
def run(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
start = time.time()
logger.debug(
f"On thread #{threading.get_ident()}, start connection attempt")
while True:
iStart = time.time()
if type(self.msg) not in [str, bytearray, bytes]:
print('Sender worker msg: ', self.msg)
if type(self.addr) not in [str, bytearray, bytes]:
print('SenderWorker addr: ', self.addr,
'type: ', type(self.addr))
self.addr = self.addr[0]
self.sock.sendto(self.msg, (self.addr, 8421,))
if time.time() - iStart > 0.3:
break
logger.debug(f"Send complete using {time.time()-start} seconds")
self.sock.close()
logger = logging.getLogger('Sender')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh = logging.FileHandler("applog.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
|
normal
|
{
"blob_id": "67446f50d1c062eddcad282d3bf508967c5192fc",
"index": 4905,
"step-1": "<mask token>\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n <mask token>\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n <mask token>\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n <mask token>\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {'message': request.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': request.fromUsername, 'srcGroup': self.\n info['groupID'], 'desGroup': request.groupID, 'admin': self.\n info.get('isAdmin', ''), 'member': self.info.get('isMember', ''\n ), 'broadcast': True, 'groupBroadcast': False, 'memberRq': True,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': True, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {'message': request.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': request.fromUsername, 'srcGroup': self.\n info['groupID'], 'desGroup': request.groupID, 'admin': self.\n info.get('isAdmin', ''), 'member': self.info.get('isMember', ''\n ), 'broadcast': True, 'groupBroadcast': False, 'memberRq': True,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': True, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\n<mask token>\nlogger.setLevel(logging.DEBUG)\n<mask token>\nch.setFormatter(formatter)\n<mask token>\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n",
"step-4": "from network.utility import *\nfrom entities.message import Message, BroadcastMessage, GroupMessage\nfrom entities.node import Node\nfrom entities.group import GroupBroadcast\nfrom entities.request import Request\nimport threading\nimport time\nimport logging\nimport random\nimport json\nimport socket\nfrom services.user import UserService\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': self.\n info.get('groupID', ''), 'desGroup': '', 'admin': self.info.get\n ('isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {'timestamp': message.timestamp, 'message': message.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': message.fromUsername, 'srcGroup': message.\n groupID, 'desGroup': '', 'admin': self.info.get('isAdmin', ''),\n 'member': self.info.get('isMember', ''), 'broadcast': True,\n 'groupBroadcast': True, 'memberRq': False, 'ackRq': False,\n 'denyRq': False, 'leaveRq': False, 'nodeRq': False, 'big': \n False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {'message': request.message}\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': request.fromUsername, 'srcGroup': self.\n info['groupID'], 'desGroup': request.groupID, 'admin': self.\n info.get('isAdmin', ''), 'member': self.info.get('isMember', ''\n ), 'broadcast': True, 'groupBroadcast': False, 'memberRq': True,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': True, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b''\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': False, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': True, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': False, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {'srcUsername': self.info['username'], 'srcGroup': self.\n info['groupID'], 'desGroup': '', 'admin': self.info.get(\n 'isAdmin', ''), 'member': self.info.get('isMember', ''),\n 'broadcast': True, 'groupBroadcast': False, 'memberRq': False,\n 'ackRq': False, 'denyRq': False, 'leaveRq': False, 'nodeRq': \n False, 'big': False, 'nodeRep': True, 'contentLength': len(body)}\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(format(random.getrandbits(256),\n 'x').zfill(64))\n self.msg = self.packageHash + msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f'On thread #{threading.get_ident()}, start connection attempt')\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr, 'type: ', type(self\n .addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f'Send complete using {time.time() - start} seconds')\n self.sock.close()\n\n\nlogger = logging.getLogger('Sender')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nfh = logging.FileHandler('applog.log')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n",
"step-5": "from network.utility import *\nfrom entities.message import Message, BroadcastMessage, GroupMessage\nfrom entities.node import Node\nfrom entities.group import GroupBroadcast\nfrom entities.request import Request\nimport threading\nimport time\nimport logging\nimport random\nimport json\nimport socket\nfrom services.user import UserService\n\n\nclass Sender:\n\n def __init__(self, reverseMap, info):\n self.reverseMap = reverseMap\n self.info = info\n\n def sendMessage(self, message):\n data = {\"timestamp\": message.timestamp, \"message\": message.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": message.fromUsername,\n \"srcGroup\": self.info.get(\"groupID\", \"\"),\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": False,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(message.toUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageBroadcast(self, message):\n data = {\"timestamp\": message.timestamp, \"message\": message.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": message.fromUsername,\n \"srcGroup\": self.info.get(\"groupID\", \"\"),\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendMessageGroup(self, message):\n data = {\"timestamp\": message.timestamp, \"message\": message.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": message.fromUsername,\n \"srcGroup\": message.groupID,\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": True,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupJoinRequest(self, request):\n data = {\"message\": request.message}\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": request.fromUsername,\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": request.groupID,\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": False,\n \"memberRq\": True,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupAcknowledgeRequest(self, request):\n body = b\"\"\n header = {\n \"srcUsername\": self.info[\"username\"],\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": False,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": True,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupDenyRequest(self, request):\n body = b\"\"\n header = {\n \"srcUsername\": self.info[\"username\"],\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": False,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": True,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": False,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n addr = self.reverseMap.get(request.fromUsername)\n worker = SenderWorker(addr, msg)\n worker.start()\n\n def sendGroupBroadcast(self):\n data = self.info\n body = json.dumps(data).encode('utf-8')\n header = {\n \"srcUsername\": self.info[\"username\"],\n \"srcGroup\": self.info[\"groupID\"],\n \"desGroup\": \"\",\n \"admin\": self.info.get(\"isAdmin\", \"\"),\n \"member\": self.info.get(\"isMember\", \"\"),\n \"broadcast\": True,\n \"groupBroadcast\": False,\n \"memberRq\": False,\n \"ackRq\": False,\n \"denyRq\": False,\n \"leaveRq\": False,\n \"nodeRq\": False,\n \"big\": False,\n \"nodeRep\": True,\n \"contentLength\": len(body),\n }\n packedHeader = packHeader(header)\n msg = packedHeader + body\n for addr in self.reverseMap.values():\n worker = SenderWorker(addr, msg)\n worker.start()\n\n\nclass SenderWorker(threading.Thread):\n\n def __init__(self, addr, msg):\n threading.Thread.__init__(self)\n self.addr = addr\n self.packageHash = bytes.fromhex(\n format(random.getrandbits(256), \"x\").zfill(64))\n self.msg = self.packageHash+msg\n self.sock = None\n\n def run(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n start = time.time()\n logger.debug(\n f\"On thread #{threading.get_ident()}, start connection attempt\")\n while True:\n iStart = time.time()\n if type(self.msg) not in [str, bytearray, bytes]:\n print('Sender worker msg: ', self.msg)\n if type(self.addr) not in [str, bytearray, bytes]:\n print('SenderWorker addr: ', self.addr,\n 'type: ', type(self.addr))\n self.addr = self.addr[0]\n self.sock.sendto(self.msg, (self.addr, 8421,))\n if time.time() - iStart > 0.3:\n break\n logger.debug(f\"Send complete using {time.time()-start} seconds\")\n self.sock.close()\n\n\nlogger = logging.getLogger('Sender')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nfh = logging.FileHandler(\"applog.log\")\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n",
"step-ids": [
9,
12,
13,
15,
16
]
}
|
[
9,
12,
13,
15,
16
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from machina.apps.forum_conversation.abstract_models import AbstractPost
from machina.apps.forum_conversation.abstract_models import AbstractTopic
from machina.core.db.models import model_factory
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
Topic = model_factory(AbstractTopic)
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content="Flag updates on post {}".format(self.subject), notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content="Vote update on post {}".format(self.subject), notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(
verbose_name=_('Flag count'), editable=False, blank=True, default=0)
@receiver(post_save, sender=User)
def create_userflags(sender, instance, created, **kwargs):
if created:
Userflags.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = "You have a new notification"
notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id)
if created:
notification_content = "A new post was created on your topic {}".format(instance.topic.slug)
else:
notification_content = "A post's contetn was edited on your topic {}".format(instance.topic.slug)
n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content)
n.save()
|
normal
|
{
"blob_id": "1e81e0f3cb2fb25fdef08a913aa1ff77d0c2a562",
"index": 9204,
"step-1": "<mask token>\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n<mask token>\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-2": "<mask token>\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-3": "<mask token>\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-4": "from __future__ import unicode_literals\nfrom machina.apps.forum_conversation.abstract_models import AbstractPost\nfrom machina.apps.forum_conversation.abstract_models import AbstractTopic\nfrom machina.core.db.models import model_factory\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom machina.apps.forum_conversation.abstract_models import AbstractPost\nfrom machina.apps.forum_conversation.abstract_models import AbstractTopic\nfrom machina.core.db.models import model_factory\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n\n notification_link = \"/forum/{}-{}/topic/{}-{}/?post={}#{}\".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\"Flag updates on post {}\".format(self.subject), notification_link=notification_link)\n n.save()\n\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\"Vote update on post {}\".format(self.subject), notification_link=notification_link)\n n.save()\n\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n flag_count = models.PositiveIntegerField(\n verbose_name=_('Flag count'), editable=False, blank=True, default=0)\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance) \n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save() \n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = \"You have a new notification\"\n notification_link = \"/forum/{}-{}/topic/{}-{}/?post={}#{}\".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id)\n\n if created:\n notification_content = \"A new post was created on your topic {}\".format(instance.topic.slug)\n else:\n notification_content = \"A post's contetn was edited on your topic {}\".format(instance.topic.slug)\n\n n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content)\n n.save()\n",
"step-ids": [
10,
11,
12,
13,
14
]
}
|
[
10,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
class OrderCreateView(CreateView):
template_name = 'orders/form.html'
form_class = OrderForm
success_url = '/'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrdersListView(ListView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class OrderCreateView(CreateView):
template_name = 'orders/form.html'
form_class = OrderForm
success_url = '/'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrdersListView(ListView):
template_name = 'orders/index.html'
queryset = Order.objects.all()
context_object_name = 'order_list'
class OrderCreateView(CreateView):
template_name = 'orders/form.html'
form_class = OrderForm
success_url = '/'
<|reserved_special_token_1|>
from django.views.generic import TemplateView, FormView, CreateView, ListView
from .models import Order
from .form import OrderForm
class OrdersListView(ListView):
template_name = 'orders/index.html'
queryset = Order.objects.all()
context_object_name = 'order_list'
class OrderCreateView(CreateView):
template_name = 'orders/form.html'
form_class = OrderForm
success_url = '/'
|
flexible
|
{
"blob_id": "afd184962e8e69843ca518e140d5fdde3d7c9ed2",
"index": 7456,
"step-1": "<mask token>\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-2": "<mask token>\n\n\nclass OrdersListView(ListView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-3": "<mask token>\n\n\nclass OrdersListView(ListView):\n template_name = 'orders/index.html'\n queryset = Order.objects.all()\n context_object_name = 'order_list'\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-4": "from django.views.generic import TemplateView, FormView, CreateView, ListView\nfrom .models import Order\nfrom .form import OrderForm\n\n\nclass OrdersListView(ListView):\n template_name = 'orders/index.html'\n queryset = Order.objects.all()\n context_object_name = 'order_list'\n\n\nclass OrderCreateView(CreateView):\n template_name = 'orders/form.html'\n form_class = OrderForm\n success_url = '/'\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from sikuli import *
import logging
import myTools
from datetime import date
import reports_Compare
#---------------------------------------------------#
def fSet_BillDate(pMonth):
#---------------------------------------------------#
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + "/27/" + Settings.dataYear)
time.sleep(1)
# make sure timeslips has focus
myTools.getFocus()
# open revise date
type("b",KeyModifier.ALT)
type("d")
time.sleep(2)
# go to today
type("t")
#get to 01/01 of current year
type(Key.HOME,KeyModifier.CTRL)
# get to 01/01 of the data year
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear),thisYear):
type(Key.PAGE_UP,KeyModifier.CTRL)
time.sleep(1)
# get to 01/27 of the data year
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth-1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
#---------------------------------------------------#
def fRemove_Sort():
#---------------------------------------------------#
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern("remove_sort-1.png").similar(0.80))
time.sleep(1)
type(Key.F6)
time.sleep(1)
#---------------------------------------------------#
def fPrint_BillRun(pMonth):
#---------------------------------------------------#
reportName = "Bill-" + myTools.padZero(pMonth) + "-" + Settings.tsVersion + ".txt"
logging.debug('fPrint_BillRun: ' + reportName)
type("b",KeyModifier.CTRL)
time.sleep(1)
fRemove_Sort()
myTools.enterSlipFilter(pMonth,"n")
# print bills to text
logging.debug('-- print')
type(Key.ENTER)
time.sleep(1)
# fill in path and name; press ENTER
type(Settings.repFolder + "\\" + reportName)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
if exists("replace_msg.png"):
type("y")
# approve bills
logging.debug('-- approve')
wait(Pattern("approve_bills-1.png").targetOffset(-100,-8),FOREVER)
click(Pattern("approve_bills-1.png").targetOffset(-100,-8))
type(Key.ENTER)
time.sleep(3)
if int(Settings.tsVersion) > 2015:
wait("approving_bills.png",FOREVER)
while exists("approving_bills.png"):
logging.debug('--- msg exists')
time.sleep(2)
else:
waitVanish("approving_statusbar.png",FOREVER)
time.sleep(1)
# compare the report with baseline
reports_Compare.Compare_OneReport(reportName)
# close report entry / don't save
logging.debug('-- close report window')
click("report_generate_bills.png")
type(Key.F4,KeyModifier.CTRL)
time.sleep(2)
type("n")
time.sleep(1)
#---------------------------------------------------#
def fPrint_Bills(pMonth):
#---------------------------------------------------#
myTools.sectionStartTimeStamp("bills" + str(pMonth))
logging.debug('Print_Bills: ' + str(pMonth))
fSet_BillDate(pMonth)
fPrint_BillRun(pMonth)
myTools.sectionEndTimeStamp()
|
normal
|
{
"blob_id": "69721dca0f5d8396e330696cde52bfabad33c895",
"index": 3242,
"step-1": "<mask token>\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef fPrint_Bills(pMonth):\n myTools.sectionStartTimeStamp('bills' + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-3": "<mask token>\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\ndef fPrint_BillRun(pMonth):\n reportName = 'Bill-' + myTools.padZero(pMonth\n ) + '-' + Settings.tsVersion + '.txt'\n logging.debug('fPrint_BillRun: ' + reportName)\n type('b', KeyModifier.CTRL)\n time.sleep(1)\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth, 'n')\n logging.debug('-- print')\n type(Key.ENTER)\n time.sleep(1)\n type(Settings.repFolder + '\\\\' + reportName)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n if exists('replace_msg.png'):\n type('y')\n logging.debug('-- approve')\n wait(Pattern('approve_bills-1.png').targetOffset(-100, -8), FOREVER)\n click(Pattern('approve_bills-1.png').targetOffset(-100, -8))\n type(Key.ENTER)\n time.sleep(3)\n if int(Settings.tsVersion) > 2015:\n wait('approving_bills.png', FOREVER)\n while exists('approving_bills.png'):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish('approving_statusbar.png', FOREVER)\n time.sleep(1)\n reports_Compare.Compare_OneReport(reportName)\n logging.debug('-- close report window')\n click('report_generate_bills.png')\n type(Key.F4, KeyModifier.CTRL)\n time.sleep(2)\n type('n')\n time.sleep(1)\n\n\ndef fPrint_Bills(pMonth):\n myTools.sectionStartTimeStamp('bills' + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-4": "from sikuli import *\nimport logging\nimport myTools\nfrom datetime import date\nimport reports_Compare\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\ndef fPrint_BillRun(pMonth):\n reportName = 'Bill-' + myTools.padZero(pMonth\n ) + '-' + Settings.tsVersion + '.txt'\n logging.debug('fPrint_BillRun: ' + reportName)\n type('b', KeyModifier.CTRL)\n time.sleep(1)\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth, 'n')\n logging.debug('-- print')\n type(Key.ENTER)\n time.sleep(1)\n type(Settings.repFolder + '\\\\' + reportName)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n if exists('replace_msg.png'):\n type('y')\n logging.debug('-- approve')\n wait(Pattern('approve_bills-1.png').targetOffset(-100, -8), FOREVER)\n click(Pattern('approve_bills-1.png').targetOffset(-100, -8))\n type(Key.ENTER)\n time.sleep(3)\n if int(Settings.tsVersion) > 2015:\n wait('approving_bills.png', FOREVER)\n while exists('approving_bills.png'):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish('approving_statusbar.png', FOREVER)\n time.sleep(1)\n reports_Compare.Compare_OneReport(reportName)\n logging.debug('-- close report window')\n click('report_generate_bills.png')\n type(Key.F4, KeyModifier.CTRL)\n time.sleep(2)\n type('n')\n time.sleep(1)\n\n\ndef fPrint_Bills(pMonth):\n myTools.sectionStartTimeStamp('bills' + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-5": "from sikuli import *\nimport logging\nimport myTools\nfrom datetime import date\nimport reports_Compare\n\n#---------------------------------------------------#\ndef fSet_BillDate(pMonth):\n#---------------------------------------------------#\n\n if pMonth == 13:\n pMonth = 12 \n\n logging.debug('- change bill date: ' + str(pMonth) + \"/27/\" + Settings.dataYear)\n time.sleep(1)\n\n # make sure timeslips has focus\n myTools.getFocus()\n\n # open revise date\n type(\"b\",KeyModifier.ALT)\n type(\"d\") \n time.sleep(2)\n\n # go to today\n type(\"t\")\n\n #get to 01/01 of current year\n type(Key.HOME,KeyModifier.CTRL) \n\n # get to 01/01 of the data year\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear),thisYear):\n type(Key.PAGE_UP,KeyModifier.CTRL) \n time.sleep(1)\n\n # get to 01/27 of the data year\n myTools.pressDOWN(4)\n myTools.pressLEFT(2) \n\n for nextMonth in range(pMonth-1):\n type(Key.PAGE_DOWN) \n time.sleep(1)\n \n type(Key.ENTER)\n time.sleep(1) \n\n#---------------------------------------------------#\ndef fRemove_Sort():\n#---------------------------------------------------#\n\n time.sleep(1)\n logging.debug('- remove sort')\n \n type(Key.F6)\n time.sleep(1)\n\n click(Pattern(\"remove_sort-1.png\").similar(0.80))\n time.sleep(1)\n \n type(Key.F6)\n time.sleep(1)\n\n#---------------------------------------------------#\ndef fPrint_BillRun(pMonth):\n#---------------------------------------------------#\n \n reportName = \"Bill-\" + myTools.padZero(pMonth) + \"-\" + Settings.tsVersion + \".txt\" \n logging.debug('fPrint_BillRun: ' + reportName)\n\n type(\"b\",KeyModifier.CTRL)\n time.sleep(1)\n\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth,\"n\")\n\n # print bills to text\n logging.debug('-- print') \n type(Key.ENTER) \n time.sleep(1)\n\n # fill in path and name; press ENTER\n type(Settings.repFolder + \"\\\\\" + reportName)\n time.sleep(1)\n type(Key.ENTER) \n time.sleep(1)\n\n if exists(\"replace_msg.png\"):\n type(\"y\")\n\n # approve bills\n logging.debug('-- approve') \n wait(Pattern(\"approve_bills-1.png\").targetOffset(-100,-8),FOREVER)\n click(Pattern(\"approve_bills-1.png\").targetOffset(-100,-8))\n type(Key.ENTER)\n time.sleep(3)\n\n if int(Settings.tsVersion) > 2015:\n wait(\"approving_bills.png\",FOREVER) \n while exists(\"approving_bills.png\"):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish(\"approving_statusbar.png\",FOREVER) \n time.sleep(1)\n\n # compare the report with baseline\n reports_Compare.Compare_OneReport(reportName)\n\n # close report entry / don't save\n logging.debug('-- close report window')\n click(\"report_generate_bills.png\")\n type(Key.F4,KeyModifier.CTRL)\n time.sleep(2)\n type(\"n\") \n time.sleep(1)\n\n#---------------------------------------------------#\ndef fPrint_Bills(pMonth):\n#---------------------------------------------------#\n\n myTools.sectionStartTimeStamp(\"bills\" + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n \n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class User:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def greet_user(self):
if self.gender.lower() == 'male':
print('Greetings, Mr. ' + self.last_name.title() + '!')
elif self.gender.lower() == 'female':
print('Greetings, Miss ' + self.last_name.title() + '!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class User:
<|reserved_special_token_0|>
def describe_user(self):
print('The name of the user is ' + self.full_name + '.')
print("The user's gender is " + self.gender + '.')
print('The user is ' + str(self.age) + ' years old.')
def greet_user(self):
if self.gender.lower() == 'male':
print('Greetings, Mr. ' + self.last_name.title() + '!')
elif self.gender.lower() == 'female':
print('Greetings, Miss ' + self.last_name.title() + '!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class User:
def __init__(self, first, last, gender, age):
self.first_name = first
self.last_name = last
self.gender = gender
self.age = age
self.full_name = self.first_name + ' ' + self.last_name
def describe_user(self):
print('The name of the user is ' + self.full_name + '.')
print("The user's gender is " + self.gender + '.')
print('The user is ' + str(self.age) + ' years old.')
def greet_user(self):
if self.gender.lower() == 'male':
print('Greetings, Mr. ' + self.last_name.title() + '!')
elif self.gender.lower() == 'female':
print('Greetings, Miss ' + self.last_name.title() + '!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class User:
def __init__(self, first, last, gender, age):
self.first_name = first
self.last_name = last
self.gender = gender
self.age = age
self.full_name = self.first_name + ' ' + self.last_name
def describe_user(self):
print('The name of the user is ' + self.full_name + '.')
print("The user's gender is " + self.gender + '.')
print('The user is ' + str(self.age) + ' years old.')
def greet_user(self):
if self.gender.lower() == 'male':
print('Greetings, Mr. ' + self.last_name.title() + '!')
elif self.gender.lower() == 'female':
print('Greetings, Miss ' + self.last_name.title() + '!')
user1 = User('zhichao', 'li', 'male', 27)
user2 = User('juan', 'zhang', 'female', 28)
user3 = User('Tian', 'ZHANG', 'male', 26)
user1.describe_user()
user1.greet_user()
user2.describe_user()
user2.greet_user()
user3.describe_user()
user3.greet_user()
<|reserved_special_token_1|>
class User():
def __init__(self, first, last, gender, age):
self.first_name = first
self.last_name = last
self.gender = gender
self.age = age
self.full_name = self.first_name + " " + self.last_name
def describe_user(self):
print("The name of the user is " + self.full_name + ".")
print("The user's gender is " + self.gender + ".")
print("The user is " + str(self.age) + " years old.")
def greet_user(self):
if self.gender.lower() == "male":
print("Greetings, Mr. " + self.last_name.title() + "!")
elif self.gender.lower() == "female":
print("Greetings, Miss " + self.last_name.title() + "!")
user1 = User("zhichao", "li", "male", 27)
user2 = User("juan", "zhang", "female", 28)
user3 = User("Tian", "ZHANG", "male", 26)
user1.describe_user()
user1.greet_user()
user2.describe_user()
user2.greet_user()
user3.describe_user()
user3.greet_user()
|
flexible
|
{
"blob_id": "93b712c60ba4bfa81d967ec59035b6fb7793ce87",
"index": 1974,
"step-1": "class User:\n <mask token>\n <mask token>\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-2": "class User:\n <mask token>\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-3": "class User:\n\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + ' ' + self.last_name\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-4": "class User:\n\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + ' ' + self.last_name\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\nuser1 = User('zhichao', 'li', 'male', 27)\nuser2 = User('juan', 'zhang', 'female', 28)\nuser3 = User('Tian', 'ZHANG', 'male', 26)\nuser1.describe_user()\nuser1.greet_user()\nuser2.describe_user()\nuser2.greet_user()\nuser3.describe_user()\nuser3.greet_user()\n",
"step-5": "class User():\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + \" \" + self.last_name\n\n def describe_user(self):\n print(\"The name of the user is \" + self.full_name + \".\")\n print(\"The user's gender is \" + self.gender + \".\")\n print(\"The user is \" + str(self.age) + \" years old.\")\n\n def greet_user(self):\n if self.gender.lower() == \"male\":\n print(\"Greetings, Mr. \" + self.last_name.title() + \"!\")\n elif self.gender.lower() == \"female\":\n print(\"Greetings, Miss \" + self.last_name.title() + \"!\")\n\n\nuser1 = User(\"zhichao\", \"li\", \"male\", 27)\nuser2 = User(\"juan\", \"zhang\", \"female\", 28)\nuser3 = User(\"Tian\", \"ZHANG\", \"male\", 26)\n\nuser1.describe_user()\nuser1.greet_user()\nuser2.describe_user()\nuser2.greet_user()\nuser3.describe_user()\nuser3.greet_user()\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
from queue import Queue
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
def test_stack():
s = Stack()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
assert str(s) == 'head > 4 > 3 > 2 > 1 > '
assert s.pop() == 4
assert s.pop() == 3
assert s.pop() == 2
assert s.pop() == 1
if __name__ == '__main__':
test_stack()
|
normal
|
{
"blob_id": "4f5f4aadfeabb13790b417b334c5f73c6d0345a7",
"index": 9256,
"step-1": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n <mask token>\n\n def top(self):\n return self.q1.head.next.element\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\ndef test_stack():\n s = Stack()\n s.push(1)\n s.push(2)\n s.push(3)\n s.push(4)\n assert str(s) == 'head > 4 > 3 > 2 > 1 > '\n assert s.pop() == 4\n assert s.pop() == 3\n assert s.pop() == 2\n assert s.pop() == 1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\ndef test_stack():\n s = Stack()\n s.push(1)\n s.push(2)\n s.push(3)\n s.push(4)\n assert str(s) == 'head > 4 > 3 > 2 > 1 > '\n assert s.pop() == 4\n assert s.pop() == 3\n assert s.pop() == 2\n assert s.pop() == 1\n\n\nif __name__ == '__main__':\n test_stack()\n",
"step-5": "from queue import Queue\n\n\nclass Stack:\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\ndef test_stack():\n s = Stack()\n s.push(1)\n s.push(2)\n s.push(3)\n s.push(4)\n assert str(s) == 'head > 4 > 3 > 2 > 1 > '\n assert s.pop() == 4\n assert s.pop() == 3\n assert s.pop() == 2\n assert s.pop() == 1\n\n\nif __name__ == '__main__':\n test_stack()",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
"""Test an example."""
from . import main
def test_readme_escaping() -> None:
"""Ensure the demo matches expected."""
assert main() == "<div><span>Escaping</span></div>"
|
normal
|
{
"blob_id": "7b459aad399a31f61b8686e1919b38d5538924b8",
"index": 2014,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_readme_escaping() ->None:\n \"\"\"Ensure the demo matches expected.\"\"\"\n assert main() == '<div><span>Escaping</span></div>'\n",
"step-3": "<mask token>\nfrom . import main\n\n\ndef test_readme_escaping() ->None:\n \"\"\"Ensure the demo matches expected.\"\"\"\n assert main() == '<div><span>Escaping</span></div>'\n",
"step-4": "\"\"\"Test an example.\"\"\"\nfrom . import main\n\n\ndef test_readme_escaping() -> None:\n \"\"\"Ensure the demo matches expected.\"\"\"\n assert main() == \"<div><span>Escaping</span></div>\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
self.board_resources = np.array([res_dict['desert']] + [res_dict[
'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +
[res_dict['wood']] * 4 + [res_dict['sheep']] * 4)
np.random.shuffle(self.board_resources)
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,
9, 10, 10, 11, 11, 12])
np.random.shuffle(self.roll_numbers)
self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[
'2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +
[port_dict['2wood:1']] + [port_dict['2sheep:1']])
np.random.shuffle(self.ports)
zero_tile_nr = np.where(self.roll_numbers == 0)
desert_tile_nr = np.where(self.board_resources == res_dict['desert'])
self.robber = desert_tile_nr[0][0] + 1
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr
] = self.board_resources[desert_tile_nr], self.board_resources[
zero_tile_nr]
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
def assign_specs(self) ->None:
for item in terrains_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[
item[0]])
self.terrains[item[0]].edges = tuple(local_egdes)
self.terrains[item[0]].intersections = tuple(local_intersections)
self.terrains[item[0]].resource = self.board_resources[item[0] - 1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1
]
for item in intersections_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections
[item[0]])
self.intersections[item[0]].edges = local_egdes
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
self.board_resources = np.array([res_dict['desert']] + [res_dict[
'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +
[res_dict['wood']] * 4 + [res_dict['sheep']] * 4)
np.random.shuffle(self.board_resources)
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,
9, 10, 10, 11, 11, 12])
np.random.shuffle(self.roll_numbers)
self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[
'2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +
[port_dict['2wood:1']] + [port_dict['2sheep:1']])
np.random.shuffle(self.ports)
zero_tile_nr = np.where(self.roll_numbers == 0)
desert_tile_nr = np.where(self.board_resources == res_dict['desert'])
self.robber = desert_tile_nr[0][0] + 1
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr
] = self.board_resources[desert_tile_nr], self.board_resources[
zero_tile_nr]
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
def assign_specs(self) ->None:
for item in terrains_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[
item[0]])
self.terrains[item[0]].edges = tuple(local_egdes)
self.terrains[item[0]].intersections = tuple(local_intersections)
self.terrains[item[0]].resource = self.board_resources[item[0] - 1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1
]
for item in intersections_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections
[item[0]])
self.intersections[item[0]].edges = local_egdes
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
"""
Cards are initialized and tracked in catan.py
def buy_dev_card(self,current_player):
# pop the card from the dev card and add it to the players dev cards
#TODO need to see if you can purchase not sure how to use that method
self.card=dev_cards.pop()
player(current_player).development_cards.insert(card)
player(current_player).resource_cards.remove('sheep')
player(current_player).resource_cards.remove('wheat')
player(current_player).resource_cards.remove('ore')
"""
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
self.board_resources = np.array([res_dict['desert']] + [res_dict[
'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +
[res_dict['wood']] * 4 + [res_dict['sheep']] * 4)
np.random.shuffle(self.board_resources)
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,
9, 10, 10, 11, 11, 12])
np.random.shuffle(self.roll_numbers)
self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[
'2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +
[port_dict['2wood:1']] + [port_dict['2sheep:1']])
np.random.shuffle(self.ports)
zero_tile_nr = np.where(self.roll_numbers == 0)
desert_tile_nr = np.where(self.board_resources == res_dict['desert'])
self.robber = desert_tile_nr[0][0] + 1
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr
] = self.board_resources[desert_tile_nr], self.board_resources[
zero_tile_nr]
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
def assign_specs(self) ->None:
for item in terrains_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[
item[0]])
self.terrains[item[0]].edges = tuple(local_egdes)
self.terrains[item[0]].intersections = tuple(local_intersections)
self.terrains[item[0]].resource = self.board_resources[item[0] - 1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1
]
for item in intersections_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections
[item[0]])
self.intersections[item[0]].edges = local_egdes
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
"""
Cards are initialized and tracked in catan.py
def buy_dev_card(self,current_player):
# pop the card from the dev card and add it to the players dev cards
#TODO need to see if you can purchase not sure how to use that method
self.card=dev_cards.pop()
player(current_player).development_cards.insert(card)
player(current_player).resource_cards.remove('sheep')
player(current_player).resource_cards.remove('wheat')
player(current_player).resource_cards.remove('ore')
"""
def main():
b = Board()
print(b)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import numpy as np
from board_specs import *
from board_components import *
import constants
import board_test
RESOURCE_NAMES = constants.RESOURCE_NAMES
res_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))
PORTS_NAMES = constants.PORTS_NAMES
port_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
self.board_resources = np.array([res_dict['desert']] + [res_dict[
'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +
[res_dict['wood']] * 4 + [res_dict['sheep']] * 4)
np.random.shuffle(self.board_resources)
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,
9, 10, 10, 11, 11, 12])
np.random.shuffle(self.roll_numbers)
self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[
'2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +
[port_dict['2wood:1']] + [port_dict['2sheep:1']])
np.random.shuffle(self.ports)
zero_tile_nr = np.where(self.roll_numbers == 0)
desert_tile_nr = np.where(self.board_resources == res_dict['desert'])
self.robber = desert_tile_nr[0][0] + 1
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr
] = self.board_resources[desert_tile_nr], self.board_resources[
zero_tile_nr]
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
def assign_specs(self) ->None:
for item in terrains_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[
item[0]])
self.terrains[item[0]].edges = tuple(local_egdes)
self.terrains[item[0]].intersections = tuple(local_intersections)
self.terrains[item[0]].resource = self.board_resources[item[0] - 1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1
]
for item in intersections_specs:
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections
[item[0]])
self.intersections[item[0]].edges = local_egdes
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
"""
Cards are initialized and tracked in catan.py
def buy_dev_card(self,current_player):
# pop the card from the dev card and add it to the players dev cards
#TODO need to see if you can purchase not sure how to use that method
self.card=dev_cards.pop()
player(current_player).development_cards.insert(card)
player(current_player).resource_cards.remove('sheep')
player(current_player).resource_cards.remove('wheat')
player(current_player).resource_cards.remove('ore')
"""
def main():
b = Board()
print(b)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import numpy as np
from board_specs import *
from board_components import *
import constants
import board_test
# List of resources available to be distributed on the board
RESOURCE_NAMES = constants.RESOURCE_NAMES
# Create a dictionary of each resource and a corresponding number id
res_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))
# List of available ports that can be distributed around the board
PORTS_NAMES = constants.PORTS_NAMES
# Create a dictionary of each port and a corresponding number id
port_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
# Array of each resource id number repeated the amount of times that
# the resource is available on the board.
# This will be used to distribute the resources into slots on the board
self.board_resources = np.array(
[res_dict["desert"]]
+ [res_dict["brick"]] * 3
+ [res_dict["ore"]] * 3
+ [res_dict["hay"]] * 4
+ [res_dict["wood"]] * 4
+ [res_dict["sheep"]] * 4
)
# Shuffle the resource array for randomized distribution
np.random.shuffle(self.board_resources)
# replace lines #42 and #44 with the following:
# self.roll_numbers = board_test.roll_numbers
# number associated with the desert and 0 can not actually be rolled
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12])
# shuffle number options
np.random.shuffle(self.roll_numbers)
# Array of the port ids, amount of times each port is available -
self.ports = np.array(
[port_dict["3:1"]] * 4
+ [port_dict["2brick:1"]]
+ [port_dict["2ore:1"]]
+ [port_dict["2hay:1"]]
+ [port_dict["2wood:1"]]
+ [port_dict["2sheep:1"]]
)
# shuffle the ports for randomized distribution
np.random.shuffle(self.ports)
# Zero_tile_nr will represent where the 0 number exists
zero_tile_nr = np.where(self.roll_numbers == 0)
# Desert_tile_nr will represent where the desert resource exists
desert_tile_nr = np.where(self.board_resources == res_dict["desert"])
# Robber will keep track of where the robber is and it starts in
# the desert. Robber will be an integer.
# Numpy returns a tuple of which the first is a list with the index.
# We'll extract it, and add 1 since terrain keys start at 1, not 0.
self.robber = desert_tile_nr[0][0] + 1
# as the desert tile and replace whatever was already in the desert
# tile into the empty zero tile
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr] =\
(self.board_resources[desert_tile_nr], self.board_resources[zero_tile_nr])
# The following code create the board objects: terrains, edges, intersections.
# Initialize a list for each attribute type.
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
# Assign the correct attributes for each attribute.
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
# A message, of how the board is displayed.
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
# Display each terrains; the identifying numbers correspond to
# the above diagram.
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
# The following methods will initialize all objects with default
# arguments; their attribute objects will be reassigned later. This
# is because the objects refer each other as attributes, and they
# must exist before being assigned. The objects will be stored in a
# dictionary, with reference numbers as keys.
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
# The following method will assign the correct attributes for each
# object. It does not matter if the object that's assigned already
# has it's own attributes referred to properly, or if it will be
# assigned later. The pointers remain unchanged, and all objects
# will have their proper attributes. This circular relationship is
# interesting. An object's attribute's attribute can be the initial
# object.
def assign_specs(self) -> None:
# First, it loops through the list of terrains from the board_specs
# file. The first item is the key/identifier. Then there are two
# tuples: the intersections, and the edges.
for item in terrains_specs:
# Create a local variable to hold the edges for this terrain.
local_egdes = []
for subitem in item[1]:
# Each integer in the tuple refers to a key in the edges
# dictionary. This edge will be added to the list.
# Additionally, this edge's terrains attribute will be updated
# to hold the terrain we're working on now.
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
# The same process is repeated for the intersections.
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[item[0]])
# The local lists are converted to tuples and passed to the terrain.
self.terrains[item[0]].edges = (tuple(local_egdes))
self.terrains[item[0]].intersections = (tuple(local_intersections))
# Assign the last landscape and resource number. (The lists
# were shuffled, so it's random.) I deduct 1 from the list index,
# since the dictionary uses keys starting at 1, and lists start at 0.
self.terrains[item[0]].resource = self.board_resources[item[0]-1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0]-1]
# Using the next list from the board_specs file, the intersections and
# edges will reference each other. Additionally, the ports will be added.
for item in intersections_specs:
# It uses the same method as above: loops throught he intersections
# to add a list of edges, and adds self to the edge being processed.
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections[item[0]])
self.intersections[item[0]].edges = local_egdes
# If that item contains a port, assign it here.
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
"""
Cards are initialized and tracked in catan.py
def buy_dev_card(self,current_player):
# pop the card from the dev card and add it to the players dev cards
#TODO need to see if you can purchase not sure how to use that method
self.card=dev_cards.pop()
player(current_player).development_cards.insert(card)
player(current_player).resource_cards.remove('sheep')
player(current_player).resource_cards.remove('wheat')
player(current_player).resource_cards.remove('ore')
"""
# Create and display the board object.
def main():
b = Board()
print(b)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "ee22d6226f734c67be91a3ccf1c8c0024bb7dc08",
"index": 5818,
"step-1": "<mask token>\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\ndef main():\n b = Board()\n print(b)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nfrom board_specs import *\nfrom board_components import *\nimport constants\nimport board_test\nRESOURCE_NAMES = constants.RESOURCE_NAMES\nres_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))\nPORTS_NAMES = constants.PORTS_NAMES\nport_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))\n\n\nclass Board:\n\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n self.board_resources = np.array([res_dict['desert']] + [res_dict[\n 'brick']] * 3 + [res_dict['ore']] * 3 + [res_dict['hay']] * 4 +\n [res_dict['wood']] * 4 + [res_dict['sheep']] * 4)\n np.random.shuffle(self.board_resources)\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9,\n 9, 10, 10, 11, 11, 12])\n np.random.shuffle(self.roll_numbers)\n self.ports = np.array([port_dict['3:1']] * 4 + [port_dict[\n '2brick:1']] + [port_dict['2ore:1']] + [port_dict['2hay:1']] +\n [port_dict['2wood:1']] + [port_dict['2sheep:1']])\n np.random.shuffle(self.ports)\n zero_tile_nr = np.where(self.roll_numbers == 0)\n desert_tile_nr = np.where(self.board_resources == res_dict['desert'])\n self.robber = desert_tile_nr[0][0] + 1\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr\n ] = self.board_resources[desert_tile_nr], self.board_resources[\n zero_tile_nr]\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n self.assign_specs()\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n def assign_specs(self) ->None:\n for item in terrains_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[\n item[0]])\n self.terrains[item[0]].edges = tuple(local_egdes)\n self.terrains[item[0]].intersections = tuple(local_intersections)\n self.terrains[item[0]].resource = self.board_resources[item[0] - 1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0] - 1\n ]\n for item in intersections_specs:\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections\n [item[0]])\n self.intersections[item[0]].edges = local_egdes\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\ndef main():\n b = Board()\n print(b)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import numpy as np\nfrom board_specs import *\nfrom board_components import *\nimport constants\nimport board_test\n\n\n# List of resources available to be distributed on the board\nRESOURCE_NAMES = constants.RESOURCE_NAMES\n# Create a dictionary of each resource and a corresponding number id\nres_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))\n# List of available ports that can be distributed around the board\nPORTS_NAMES = constants.PORTS_NAMES\n# Create a dictionary of each port and a corresponding number id\nport_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))\n\n\nclass Board:\n def __init__(self):\n \"\"\"\n Do not forget to ensure 6 and 8 are not next to each other:\n no 6-6 no 6-8 no 8-8\n \"\"\"\n # Array of each resource id number repeated the amount of times that\n # the resource is available on the board.\n # This will be used to distribute the resources into slots on the board\n self.board_resources = np.array(\n [res_dict[\"desert\"]]\n + [res_dict[\"brick\"]] * 3\n + [res_dict[\"ore\"]] * 3\n + [res_dict[\"hay\"]] * 4\n + [res_dict[\"wood\"]] * 4\n + [res_dict[\"sheep\"]] * 4\n )\n # Shuffle the resource array for randomized distribution\n np.random.shuffle(self.board_resources)\n \n # replace lines #42 and #44 with the following:\n # self.roll_numbers = board_test.roll_numbers\n \n # number associated with the desert and 0 can not actually be rolled\n self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12])\n # shuffle number options\n np.random.shuffle(self.roll_numbers)\n \n # Array of the port ids, amount of times each port is available -\n self.ports = np.array(\n [port_dict[\"3:1\"]] * 4\n + [port_dict[\"2brick:1\"]]\n + [port_dict[\"2ore:1\"]]\n + [port_dict[\"2hay:1\"]]\n + [port_dict[\"2wood:1\"]]\n + [port_dict[\"2sheep:1\"]]\n )\n # shuffle the ports for randomized distribution\n np.random.shuffle(self.ports)\n # Zero_tile_nr will represent where the 0 number exists\n zero_tile_nr = np.where(self.roll_numbers == 0)\n # Desert_tile_nr will represent where the desert resource exists\n desert_tile_nr = np.where(self.board_resources == res_dict[\"desert\"])\n # Robber will keep track of where the robber is and it starts in\n # the desert. Robber will be an integer.\n # Numpy returns a tuple of which the first is a list with the index.\n # We'll extract it, and add 1 since terrain keys start at 1, not 0.\n self.robber = desert_tile_nr[0][0] + 1\n # as the desert tile and replace whatever was already in the desert\n # tile into the empty zero tile\n self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr] =\\\n (self.board_resources[desert_tile_nr], self.board_resources[zero_tile_nr])\n\n # The following code create the board objects: terrains, edges, intersections.\n\n # Initialize a list for each attribute type.\n self.edges = self.initialize_edges()\n self.intersections = self.initialize_intersections()\n self.terrains = self.initialize_terrains()\n # Assign the correct attributes for each attribute.\n self.assign_specs()\n\n \"\"\" \n Cards are initialized and tracked in catan.py\n self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)\n self.dev_cards=random.shuffle(dev_cards)\n \"\"\"\n\n def __str__(self):\n # A message, of how the board is displayed.\n s = '\\nThe board is arranged as follows:\\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |01|02|03| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |04|05|06|07| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += '|08|09|10|11|12| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ /\\\\ \\n'\n s += ' |13|14|15|16| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\\\/ \\n'\n s += ' /\\\\ /\\\\ /\\\\ \\n'\n s += ' |17|18|19| \\n'\n s += ' \\\\/ \\\\/ \\\\/ \\n'\n # Display each terrains; the identifying numbers correspond to\n # the above diagram.\n s += 'Following is the content of each terrain:\\n\\n'\n for item in self.terrains:\n if self.robber == item:\n s += '\\nRobber is on the following tile (number {0})'.format(\n self.terrains[item].identifier)\n s += str(self.terrains[item])\n return s\n\n # The following methods will initialize all objects with default\n # arguments; their attribute objects will be reassigned later. This\n # is because the objects refer each other as attributes, and they\n # must exist before being assigned. The objects will be stored in a\n # dictionary, with reference numbers as keys.\n def initialize_edges(self):\n edges = {}\n for x in range(1, 73):\n edges[x] = Edge(x, intersections=[], terrains=[])\n return edges\n\n def initialize_intersections(self):\n intersections = {}\n for x in range(1, 55):\n intersections[x] = Intersection(x, edges=[], terrains=[])\n return intersections\n\n def initialize_terrains(self):\n terrains = {}\n for x in range(1, 20):\n terrains[x] = Terrain(x, x, 0)\n return terrains\n\n # The following method will assign the correct attributes for each\n # object. It does not matter if the object that's assigned already\n # has it's own attributes referred to properly, or if it will be\n # assigned later. The pointers remain unchanged, and all objects\n # will have their proper attributes. This circular relationship is\n # interesting. An object's attribute's attribute can be the initial\n # object.\n def assign_specs(self) -> None:\n # First, it loops through the list of terrains from the board_specs\n # file. The first item is the key/identifier. Then there are two\n # tuples: the intersections, and the edges.\n for item in terrains_specs:\n # Create a local variable to hold the edges for this terrain.\n local_egdes = []\n for subitem in item[1]:\n # Each integer in the tuple refers to a key in the edges\n # dictionary. This edge will be added to the list.\n # Additionally, this edge's terrains attribute will be updated\n # to hold the terrain we're working on now.\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].terrains.append(self.terrains[item[0]])\n\n # The same process is repeated for the intersections.\n local_intersections = []\n for subitem in item[2]:\n local_intersections.append(self.intersections[subitem])\n self.intersections[subitem].terrains.append(self.terrains[item[0]])\n\n # The local lists are converted to tuples and passed to the terrain.\n self.terrains[item[0]].edges = (tuple(local_egdes))\n self.terrains[item[0]].intersections = (tuple(local_intersections))\n\n # Assign the last landscape and resource number. (The lists\n # were shuffled, so it's random.) I deduct 1 from the list index,\n # since the dictionary uses keys starting at 1, and lists start at 0.\n self.terrains[item[0]].resource = self.board_resources[item[0]-1]\n self.terrains[item[0]].resource_num = self.roll_numbers[item[0]-1]\n\n # Using the next list from the board_specs file, the intersections and\n # edges will reference each other. Additionally, the ports will be added.\n for item in intersections_specs:\n # It uses the same method as above: loops throught he intersections\n # to add a list of edges, and adds self to the edge being processed.\n local_egdes = []\n for subitem in item[1]:\n local_egdes.append(self.edges[subitem])\n self.edges[subitem].intersections.append(self.intersections[item[0]])\n\n self.intersections[item[0]].edges = local_egdes\n # If that item contains a port, assign it here.\n if len(item) == 3:\n self.intersections[item[0]].port = self.ports[item[2]]\n\n \"\"\"\n Cards are initialized and tracked in catan.py\n def buy_dev_card(self,current_player):\n # pop the card from the dev card and add it to the players dev cards\n #TODO need to see if you can purchase not sure how to use that method\n self.card=dev_cards.pop()\n player(current_player).development_cards.insert(card)\n player(current_player).resource_cards.remove('sheep')\n player(current_player).resource_cards.remove('wheat')\n player(current_player).resource_cards.remove('ore')\n \"\"\"\n\n\n# Create and display the board object.\ndef main():\n b = Board()\n print(b)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
was = input()
print(was)
|
normal
|
{
"blob_id": "e12c411814efd7cc7417174b51f0f756589ca40b",
"index": 3325,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(was)\n",
"step-3": "was = input()\nprint(was)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7 * 7 * 128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,
padding='same')
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,
padding='same')
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=
'tanh', padding='same')
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
<|reserved_special_token_0|>
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=
'same')
self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7 * 7 * 16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {'img_shape': self.img_shape, 'name': self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7 * 7 * 128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,
padding='same')
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,
padding='same')
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=
'tanh', padding='same')
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=
'same')
self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7 * 7 * 16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {'img_shape': self.img_shape, 'name': self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
<|reserved_special_token_0|>
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('generator_loss', g_loss, step=epoch)
tf.summary.scalar('discriminator_loss', d_loss, step=epoch)
<|reserved_special_token_0|>
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = out.numpy() * 127.5 + 127.5
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis('off')
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7 * 7 * 128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,
padding='same')
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,
padding='same')
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=
'tanh', padding='same')
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=
'same')
self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7 * 7 * 16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {'img_shape': self.img_shape, 'name': self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
<|reserved_special_token_0|>
def gen_loss(fake_logits):
loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),
fake_logits)
return loss
<|reserved_special_token_0|>
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('generator_loss', g_loss, step=epoch)
tf.summary.scalar('discriminator_loss', d_loss, step=epoch)
<|reserved_special_token_0|>
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = out.numpy() * 127.5 + 127.5
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis('off')
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7 * 7 * 128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,
padding='same')
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,
padding='same')
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=
'tanh', padding='same')
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=
'same')
self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7 * 7 * 16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {'img_shape': self.img_shape, 'name': self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
<|reserved_special_token_0|>
def disc_loss(real_logits, fake_logits):
real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits),
real_logits)
fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits),
fake_logits)
loss = 0.5 * (real_loss + fake_loss)
return loss
def gen_loss(fake_logits):
loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),
fake_logits)
return loss
<|reserved_special_token_0|>
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('generator_loss', g_loss, step=epoch)
tf.summary.scalar('discriminator_loss', d_loss, step=epoch)
<|reserved_special_token_0|>
def train():
for epoch in range(config.EPOCHS):
print(f'\nEpoch {epoch + 1}/{config.EPOCHS} :')
for n, (image, label) in enumerate(train_dataset):
train_step(image, label, epoch + 1)
prog_bar.update(n)
if (epoch + 1) % 5 == 0:
ckpt_manager.save()
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = out.numpy() * 127.5 + 127.5
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis('off')
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import tensorflow as tf
from keras import layers, Model, Input
from keras.utils import Progbar, to_categorical
from keras.datasets.mnist import load_data
import numpy as np
import matplotlib.pyplot as plt
import config
import datetime
img_height, img_width, _ = config.IMAGE_SHAPE
(X, Y), (_, _) = load_data()
X = X.reshape((-1, img_height, img_width, 1))
X = X.astype("float32")
Y = to_categorical(Y, num_classes=10, dtype="float32")
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7*7*128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding="same")
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding="same")
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation="tanh", padding="same")
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2D(256, kernel_size=5, padding="same")
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7*7*16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {"img_shape": self.img_shape, "name": self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
gen = Generator(name="generator")
disc = Discriminator(name="discriminator", img_shape=config.IMAGE_SHAPE)
gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
train_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
val_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
checkpoint = tf.train.Checkpoint(generator=gen,
gen_optimizer=gen_optimizer,
discriminator=disc,
disc_optimizer=disc_optimizer)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)
# creates a summary writer, writes a summary in a file to access on tensorboard later
summary_writer = tf.summary.create_file_writer(
logdir=config.LOG_DIR + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
'''LOSSES'''
def disc_loss(real_logits, fake_logits):
real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)
fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)
loss = 0.5*(real_loss + fake_loss)
return loss
def gen_loss(fake_logits):
loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)
return loss
# give signature to avoid retracing
signature = [
tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None, 10), dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.int64)
]
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
# writes a tensorboard summary (creates graph if scalar)
with summary_writer.as_default():
tf.summary.scalar("generator_loss", g_loss, step=epoch)
tf.summary.scalar("discriminator_loss", d_loss, step=epoch)
g_loss = tf.metrics.Mean()
d_loss = tf.metrics.Mean()
prog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])
if ckpt_manager.latest_checkpoint:
checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()
print(f"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}")
def train():
for epoch in range(config.EPOCHS):
print(f"\nEpoch {epoch+1}/{config.EPOCHS} :")
for n, (image, label) in enumerate(train_dataset):
train_step(image, label, epoch+1)
prog_bar.update(n)
if (epoch+1) % 5 == 0:
ckpt_manager.save()
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = (out.numpy() * 127.5) + 127.5 # de-process
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis("off")
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
if __name__ == "__main__":
train() # train loop
'''Test Code'''
# gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),
# tf.ones((config.BATCH_SIZE, 10))])
# disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),
# tf.ones((config.BATCH_SIZE, 10))])
#
# assert gen_out.shape == (32, 28, 28, 1)
|
flexible
|
{
"blob_id": "e265b2b2ccc0841ccb8b766de4ae2a869f2d280d",
"index": 8326,
"step-1": "<mask token>\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n <mask token>\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),\n fake_logits)\n return loss\n\n\n<mask token>\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\ndef disc_loss(real_logits, fake_logits):\n real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits),\n real_logits)\n fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits),\n fake_logits)\n loss = 0.5 * (real_loss + fake_loss)\n return loss\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),\n fake_logits)\n return loss\n\n\n<mask token>\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef train():\n for epoch in range(config.EPOCHS):\n print(f'\\nEpoch {epoch + 1}/{config.EPOCHS} :')\n for n, (image, label) in enumerate(train_dataset):\n train_step(image, label, epoch + 1)\n prog_bar.update(n)\n if (epoch + 1) % 5 == 0:\n ckpt_manager.save()\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-5": "import tensorflow as tf\nfrom keras import layers, Model, Input\nfrom keras.utils import Progbar, to_categorical\nfrom keras.datasets.mnist import load_data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport config\nimport datetime\n\nimg_height, img_width, _ = config.IMAGE_SHAPE\n\n(X, Y), (_, _) = load_data()\nX = X.reshape((-1, img_height, img_width, 1))\nX = X.astype(\"float32\")\nY = to_categorical(Y, num_classes=10, dtype=\"float32\")\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7*7*128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding=\"same\")\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding=\"same\")\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding=\"same\")\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\"tanh\", padding=\"same\")\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding=\"same\")\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7*7*16)\n\n def call(self, inputs, training=None, mask=None):\n\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n\n return x\n\n def get_config(self):\n return {\"img_shape\": self.img_shape, \"name\": self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\ngen = Generator(name=\"generator\")\ndisc = Discriminator(name=\"discriminator\", img_shape=config.IMAGE_SHAPE)\n\ngen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)\ndisc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)\n\ndataset = tf.data.Dataset.from_tensor_slices((X, Y))\ntrain_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)\nval_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)\n\ncheckpoint = tf.train.Checkpoint(generator=gen,\n gen_optimizer=gen_optimizer,\n discriminator=disc,\n disc_optimizer=disc_optimizer)\nckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)\n\n# creates a summary writer, writes a summary in a file to access on tensorboard later\nsummary_writer = tf.summary.create_file_writer(\n logdir=config.LOG_DIR + \"fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n\n'''LOSSES'''\n\n\ndef disc_loss(real_logits, fake_logits):\n real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)\n fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)\n loss = 0.5*(real_loss + fake_loss)\n return loss\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)\n return loss\n\n\n# give signature to avoid retracing\n\nsignature = [\n tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),\n tf.TensorSpec(shape=(None, 10), dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.int64)\n]\n\n\n@tf.function(input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n\n # writes a tensorboard summary (creates graph if scalar)\n with summary_writer.as_default():\n tf.summary.scalar(\"generator_loss\", g_loss, step=epoch)\n tf.summary.scalar(\"discriminator_loss\", d_loss, step=epoch)\n\n\ng_loss = tf.metrics.Mean()\nd_loss = tf.metrics.Mean()\nprog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])\n\nif ckpt_manager.latest_checkpoint:\n checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()\n print(f\"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}\")\n\n\ndef train():\n for epoch in range(config.EPOCHS):\n print(f\"\\nEpoch {epoch+1}/{config.EPOCHS} :\")\n for n, (image, label) in enumerate(train_dataset):\n train_step(image, label, epoch+1)\n prog_bar.update(n)\n\n if (epoch+1) % 5 == 0:\n ckpt_manager.save()\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n\n out = gen([z, labels])\n out = (out.numpy() * 127.5) + 127.5 # de-process\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis(\"off\")\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\nif __name__ == \"__main__\":\n train() # train loop\n\n '''Test Code'''\n\n # gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),\n # tf.ones((config.BATCH_SIZE, 10))])\n # disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),\n # tf.ones((config.BATCH_SIZE, 10))])\n #\n # assert gen_out.shape == (32, 28, 28, 1)\n\n\n\n\n\n\n\n",
"step-ids": [
8,
12,
13,
15,
19
]
}
|
[
8,
12,
13,
15,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Cardid(name):
query = {'key': vars.Key, 'token': vars.Token, 'cards': 'visible'}
execute = requests.request('GET', vars.BoardGetUrl, params=query).json()
for row in execute['cards']:
if row['name'] == name:
cardID = 1
break
else:
cardID = 0
return cardID
<|reserved_special_token_1|>
import requests, vars
def Cardid(name):
query = {'key': vars.Key, 'token': vars.Token, 'cards': 'visible'}
execute = requests.request('GET', vars.BoardGetUrl, params=query).json()
for row in execute['cards']:
if row['name'] == name:
cardID = 1
break
else:
cardID = 0
return cardID
<|reserved_special_token_1|>
import requests, vars
def Cardid(name):
query = {"key":vars.Key, "token":vars.Token, "cards":"visible"}
execute = requests.request("GET", vars.BoardGetUrl, params=query).json()
for row in execute['cards']:
if row['name'] == name:
cardID = 1
break
else:
cardID = 0
return cardID
|
flexible
|
{
"blob_id": "68493acce71060799da8c6cb03f2ddffce64aa92",
"index": 8970,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Cardid(name):\n query = {'key': vars.Key, 'token': vars.Token, 'cards': 'visible'}\n execute = requests.request('GET', vars.BoardGetUrl, params=query).json()\n for row in execute['cards']:\n if row['name'] == name:\n cardID = 1\n break\n else:\n cardID = 0\n return cardID\n",
"step-3": "import requests, vars\n\n\ndef Cardid(name):\n query = {'key': vars.Key, 'token': vars.Token, 'cards': 'visible'}\n execute = requests.request('GET', vars.BoardGetUrl, params=query).json()\n for row in execute['cards']:\n if row['name'] == name:\n cardID = 1\n break\n else:\n cardID = 0\n return cardID\n",
"step-4": "import requests, vars\n\ndef Cardid(name):\n query = {\"key\":vars.Key, \"token\":vars.Token, \"cards\":\"visible\"}\n execute = requests.request(\"GET\", vars.BoardGetUrl, params=query).json()\n for row in execute['cards']:\n if row['name'] == name:\n cardID = 1\n break\n else:\n cardID = 0\n return cardID\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
import datetime
import userval
import file
from getpass import getpass
#SORRY FOR THE REDUNDANT CODE, I RAN OUT OF OPTIONS
def register():
global first,last,email,pin,password,accountName #prepared_user_details
first=input("input firstname:")
last=input("input lastname:")
email=input("input email:")
pin=input("input a four digit pin:")
password=input("Input Password:")
accountName = "{} {}".format(last,first)
#prepared_user_details= first + "," + last + "," + email + "," + str(pin) + "," + password + "," + str(0)
#---------------------Account number generator-------------------------
def genAcc():
num= 1
y=[3,0] #all account numbers generated must start with three zero to make it unique
while num <= 8:
x = random.randint(0,9)
y.append(x)
num = num +1
accountNo=''.join([str(i)for i in y])
return accountNo
#-----------------Transfer function---------------------
def transfer(tName, tNo, amount, tBankName):
user[-1]= int(user[-1]) + amount
newval=user[-1]
newval=str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
print("Tranfer successful! \Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}".format(tName, tNo, amount, tBankName))
print("Balance : ${}".format(user[-1]))
tym =datetime.datetime.now()
print(tym)
#-----------------deposit function-----------------------
def deposit(amount):
user[-1] = int(user[-1]) + amount
newval=user[-1]
newval=str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
print("{} successful deposited".format(amount))
print("your balance is ${}".format(user[-1]))
tym =datetime.datetime.now()
print(tym)
#------------------withdraw function---------------------------
def withdraw(amount):
user[-1]=int(user[-1])
if user[-1] > amount:
user[-1] -= amount
print("successful")
print("your balance is ${}".format(user[-1]))
else:
print("Sorry, not enough funds!")
newval = user[-1]
str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
tym =datetime.datetime.now()
print(tym)
#---------------------balance check function------------------------
def statement():
print("hi {} your balance is ${}.".format(user[1],user[-1]))
#---------------------pin validation function------------------------
def pinval(val):
if val == user[-3]:
return True
else:
return False
#---------------------pin reset function---------------------------
def pinReset(val,val2):
if val == val2:
user[-3] = val
print("Pin change successful")
newval = user[-3]
try:
file.update(user_acc_no,-3,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
else:
print("oops!! The two pin are not the same")
tym =datetime.datetime.now()
print(tym)
#-----------------password reset function-------------------------
def passReset(val, val2):
if val == val2:
user[-2]= val
print("Password change successful")
newval = user[-2]
try:
file.update(user_acc_no,-2,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
else:
print("Passwords not Matched")
tym =datetime.datetime.now()
print(tym)
#----------------------login function---------------------
def login():
global user_acc_no, user_password,user
print("===================LOGIN PAGE=================")
print("Enter your login details")
user_acc_no = int(input("Enter username:"))
user_password = getpass("Enter password:")
user= file.authentication(user_acc_no, user_password)
if user:
operation(user)
else:
print("invalid account and password")
login()
def welcome():
#---------------------------------main prompt---------------
opt= input("Hello!, Welcome to Zuri Bank \n1. Register\n2.Login \n==>")
#-----------------------------Registration Prompt--------------------------
if opt == '1':
print("============================ZURI BANK========================")
print("Welcome please carefully follow the prompt and register your details\n Note please only input 1 or 2 ")
register()
accountNo = ""
accountNo=genAcc()
is_user_created = file.create(accountNo,first,last,email,pin,password)
if is_user_created:
try:
print("Registration Successful!!!\n your details are:\n Account name is {} \n Account number is {}".format(accountName,accountNo))
login()
tym =datetime.datetime.now()
print(tym)
except FileExistsError:
print("sorry there was a issue in network connection, please try again")
register()
except ValueError:
print("sorry there was a issue in network connection, please try again")
register()
elif opt == '2':
login()
else:
print("Wrong input. Note: enter 1 or 2 to select")
def operation(user):
print("==========================ZURI BANK===================")
print("welcome {}".format(user[1] + ' ' + user[0]))
print("Balance : ${}".format(user[-1]))
print("Please input only 1,2,3,4,5,6, or 7")
mainOpt=input("select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>")
if mainOpt == '1':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter amount:"))
tName=input("Enter account name:")
tNo=input("Enter account Number:")
tBankName=input("Enter Bank:")
val=input("Enter PIN")
if (pinval(val) == True):
if len(tNo) != 10:
print("wrong account number, Note Account number must be 10 digit")
else:
transfer(tName,tNo,amount,tBankName)
operation(user)
else:
print("wrong pin")
elif mainOpt == '2':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter Amount:"))
val=int(input("Enter transaction Pin:"))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print("oop!! wrong pin")
elif mainOpt == '3':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter Amount:"))
deposit(amount)
operation(user)
elif mainOpt == '4':
val=input("Enter new pin:")
val2=input("Confirm new pin:")
pinReset(val,val2)
operation(user)
elif mainOpt == '5':
val=input("Enter new password:")
val2=input("Confirm new password:")
passReset(val,val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp=input("Enter complaint:")
print("Thanks {} for reaching to us, we will get back to you shortly via your email:{}".format(user[1],user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print("Thank you for banking with us!!!")
exit()
welcome()
|
normal
|
{
"blob_id": "a8106c8f14e15706b12e6d157b889288b85bc277",
"index": 6789,
"step-1": "<mask token>\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print('{} successful deposited'.format(amount))\n print('your balance is ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef register():\n global first, last, email, pin, password, accountName\n first = input('input firstname:')\n last = input('input lastname:')\n email = input('input email:')\n pin = input('input a four digit pin:')\n password = input('Input Password:')\n accountName = '{} {}'.format(last, first)\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print('{} successful deposited'.format(amount))\n print('your balance is ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef register():\n global first, last, email, pin, password, accountName\n first = input('input firstname:')\n last = input('input lastname:')\n email = input('input email:')\n pin = input('input a four digit pin:')\n password = input('Input Password:')\n accountName = '{} {}'.format(last, first)\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print('{} successful deposited'.format(amount))\n print('your balance is ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef withdraw(amount):\n user[-1] = int(user[-1])\n if user[-1] > amount:\n user[-1] -= amount\n print('successful')\n print('your balance is ${}'.format(user[-1]))\n else:\n print('Sorry, not enough funds!')\n newval = user[-1]\n str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef login():\n global user_acc_no, user_password, user\n print('===================LOGIN PAGE=================')\n print('Enter your login details')\n user_acc_no = int(input('Enter username:'))\n user_password = getpass('Enter password:')\n user = file.authentication(user_acc_no, user_password)\n if user:\n operation(user)\n else:\n print('invalid account and password')\n login()\n\n\ndef welcome():\n opt = input('Hello!, Welcome to Zuri Bank \\n1. Register\\n2.Login \\n==>')\n if opt == '1':\n print('============================ZURI BANK========================')\n print(\n \"\"\"Welcome please carefully follow the prompt and register your details\n Note please only input 1 or 2 \"\"\"\n )\n register()\n accountNo = ''\n accountNo = genAcc()\n is_user_created = file.create(accountNo, first, last, email, pin,\n password)\n if is_user_created:\n try:\n print(\n \"\"\"Registration Successful!!!\n your details are:\n Account name is {} \n Account number is {}\"\"\"\n .format(accountName, accountNo))\n login()\n tym = datetime.datetime.now()\n print(tym)\n except FileExistsError:\n print(\n 'sorry there was a issue in network connection, please try again'\n )\n register()\n except ValueError:\n print(\n 'sorry there was a issue in network connection, please try again'\n )\n register()\n elif opt == '2':\n login()\n else:\n print('Wrong input. Note: enter 1 or 2 to select')\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\nwelcome()\n",
"step-5": "import random\nimport datetime \nimport userval\nimport file\nfrom getpass import getpass\n#SORRY FOR THE REDUNDANT CODE, I RAN OUT OF OPTIONS\n\n\ndef register():\n global first,last,email,pin,password,accountName #prepared_user_details\n first=input(\"input firstname:\")\n last=input(\"input lastname:\")\n email=input(\"input email:\")\n pin=input(\"input a four digit pin:\")\n password=input(\"Input Password:\")\n accountName = \"{} {}\".format(last,first)\n #prepared_user_details= first + \",\" + last + \",\" + email + \",\" + str(pin) + \",\" + password + \",\" + str(0)\n \n #---------------------Account number generator-------------------------\n\ndef genAcc():\n num= 1\n y=[3,0] #all account numbers generated must start with three zero to make it unique\n while num <= 8:\n x = random.randint(0,9)\n y.append(x)\n num = num +1\n accountNo=''.join([str(i)for i in y])\n return accountNo\n \n #-----------------Transfer function---------------------\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1]= int(user[-1]) + amount\n newval=user[-1]\n newval=str(newval)\n try:\n file.update(user_acc_no,-1,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n print(\"Tranfer successful! \\Account name {} \\nAccount number : {} \\nAmount transferred : {} \\nBank : {}\".format(tName, tNo, amount, tBankName))\n print(\"Balance : ${}\".format(user[-1]))\n tym =datetime.datetime.now()\n print(tym)\n \n #-----------------deposit function-----------------------\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval=user[-1]\n newval=str(newval)\n try:\n file.update(user_acc_no,-1,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n print(\"{} successful deposited\".format(amount))\n print(\"your balance is ${}\".format(user[-1]))\n tym =datetime.datetime.now()\n print(tym)\n #------------------withdraw function---------------------------\n\ndef withdraw(amount):\n user[-1]=int(user[-1])\n if user[-1] > amount:\n user[-1] -= amount\n print(\"successful\")\n print(\"your balance is ${}\".format(user[-1]))\n else:\n print(\"Sorry, not enough funds!\")\n newval = user[-1]\n str(newval)\n try:\n file.update(user_acc_no,-1,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n tym =datetime.datetime.now()\n print(tym)\n \n \n #---------------------balance check function------------------------\n\n\ndef statement():\n print(\"hi {} your balance is ${}.\".format(user[1],user[-1]))\n \n \n #---------------------pin validation function------------------------\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n \n \n #---------------------pin reset function---------------------------\ndef pinReset(val,val2):\n if val == val2:\n user[-3] = val\n print(\"Pin change successful\")\n newval = user[-3]\n try:\n file.update(user_acc_no,-3,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n else:\n print(\"oops!! The two pin are not the same\")\n tym =datetime.datetime.now()\n print(tym)\n \n \n #-----------------password reset function------------------------- \ndef passReset(val, val2):\n if val == val2:\n user[-2]= val\n print(\"Password change successful\")\n newval = user[-2]\n try:\n file.update(user_acc_no,-2,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n else:\n print(\"Passwords not Matched\")\n tym =datetime.datetime.now()\n print(tym)\n \n \n #----------------------login function---------------------\n\ndef login():\n global user_acc_no, user_password,user\n print(\"===================LOGIN PAGE=================\") \n print(\"Enter your login details\")\n user_acc_no = int(input(\"Enter username:\"))\n user_password = getpass(\"Enter password:\")\n\n user= file.authentication(user_acc_no, user_password)\n \n if user:\n operation(user)\n else:\n print(\"invalid account and password\")\n login()\n \n \n\n\n\ndef welcome(): \n #---------------------------------main prompt---------------\n opt= input(\"Hello!, Welcome to Zuri Bank \\n1. Register\\n2.Login \\n==>\")\n #-----------------------------Registration Prompt--------------------------\n if opt == '1':\n print(\"============================ZURI BANK========================\")\n print(\"Welcome please carefully follow the prompt and register your details\\n Note please only input 1 or 2 \")\n \n register()\n accountNo = \"\"\n accountNo=genAcc()\n is_user_created = file.create(accountNo,first,last,email,pin,password)\n if is_user_created:\n try:\n print(\"Registration Successful!!!\\n your details are:\\n Account name is {} \\n Account number is {}\".format(accountName,accountNo))\n login()\n \n tym =datetime.datetime.now()\n print(tym)\n except FileExistsError:\n print(\"sorry there was a issue in network connection, please try again\")\n register()\n\n except ValueError:\n print(\"sorry there was a issue in network connection, please try again\")\n register()\n \n\n\n elif opt == '2':\n \n login()\n \n\n\n\n else:\n print(\"Wrong input. Note: enter 1 or 2 to select\")\n\n \ndef operation(user): \n \n print(\"==========================ZURI BANK===================\")\n print(\"welcome {}\".format(user[1] + ' ' + user[0]))\n print(\"Balance : ${}\".format(user[-1]))\n print(\"Please input only 1,2,3,4,5,6, or 7\")\n mainOpt=input(\"select an option: \\n1. Transfer \\n2. Withdrawal \\n3. Deposit \\n4. Change Pin \\n5. Reset Password \\n6. Account Statment\\n7. Complaint\\n8. Logout\\n0. Exit \\n==>\")\n \n \n if mainOpt == '1':\n print(\"Balance = ${}\".format(user[-1]))\n amount=int(input(\"Enter amount:\"))\n tName=input(\"Enter account name:\")\n tNo=input(\"Enter account Number:\")\n tBankName=input(\"Enter Bank:\")\n val=input(\"Enter PIN\")\n if (pinval(val) == True):\n if len(tNo) != 10:\n print(\"wrong account number, Note Account number must be 10 digit\")\n else:\n transfer(tName,tNo,amount,tBankName)\n operation(user)\n else:\n print(\"wrong pin\")\n \n elif mainOpt == '2':\n print(\"Balance = ${}\".format(user[-1]))\n amount=int(input(\"Enter Amount:\"))\n val=int(input(\"Enter transaction Pin:\"))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print(\"oop!! wrong pin\")\n \n elif mainOpt == '3':\n print(\"Balance = ${}\".format(user[-1]))\n amount=int(input(\"Enter Amount:\"))\n deposit(amount)\n operation(user)\n \n \n elif mainOpt == '4':\n val=input(\"Enter new pin:\")\n val2=input(\"Confirm new pin:\")\n pinReset(val,val2)\n operation(user)\n \n elif mainOpt == '5':\n val=input(\"Enter new password:\")\n val2=input(\"Confirm new password:\")\n passReset(val,val2)\n operation(user)\n \n elif mainOpt == '6':\n statement()\n operation(user)\n \n elif mainOpt == '7':\n comp=input(\"Enter complaint:\")\n print(\"Thanks {} for reaching to us, we will get back to you shortly via your email:{}\".format(user[1],user[3]))\n operation(user)\n \n elif mainOpt == '8':\n login()\n \n else:\n print(\"Thank you for banking with us!!!\")\n exit()\n \n\n\nwelcome()",
"step-ids": [
7,
8,
9,
13,
15
]
}
|
[
7,
8,
9,
13,
15
] |
import sys
import os
import random
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
def file_len(file):
initial = file.tell()
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(initial)
return size
def run():
rand_seed = None
stderr_filename = None
stdout_filename = None
if len(sys.argv) >= 4:
rand_seed = int(sys.argv[3])
if len(sys.argv) >= 3:
stderr_filename = sys.argv[2]
if len(sys.argv) >= 2:
stdout_filename = sys.argv[1]
stdout_file = None
stderr_file = None
if stdout_filename:
stdout_file = open(stdout_filename, 'r')
else:
stdout_file = StringIO()
if stderr_filename:
stderr_file = open(stderr_filename, 'r')
else:
stderr_file = StringIO()
if not rand_seed:
sys.stdout.write(stdout_file.read())
sys.stderr.write(stderr_file.read())
else:
random.seed(rand_seed)
stdout_len = file_len(stdout_file)
stdout_eof = False
stderr_eof = False
while not stdout_eof or not stderr_eof:
if not stdout_eof:
r = random.randrange(stdout_len / 4)
data = stdout_file.read(r)
if len(data) < r:
stdout_eof = True
sys.stdout.write(data)
if not stderr_eof:
r = random.randrange(stdout_len / 4)
data = stderr_file.read(r)
if len(data) < r:
stderr_eof = True
sys.stderr.write(data)
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "b7db0d2f4bbbc2c7763b9d2e6bede74979b65161",
"index": 4283,
"step-1": "<mask token>\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef file_len(file):\n initial = file.tell()\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(initial)\n return size\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n\ndef file_len(file):\n initial = file.tell()\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(initial)\n return size\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import sys\nimport os\nimport random\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n\ndef file_len(file):\n initial = file.tell()\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(initial)\n return size\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class SystemInfoJabberBot(JabberBot):
@botcmd
def serverinfo(self, mess, args):
"""Displays information about the server"""
version = open('/proc/version').read().strip()
loadavg = open('/proc/loadavg').read().strip()
return '%snn%s' % (version, loadavg)
<|reserved_special_token_0|>
@botcmd
def rot13(self, mess, args):
"""Returns passed arguments rot13'ed"""
return args.encode('rot13')
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return mess.getFrom().getStripped()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SystemInfoJabberBot(JabberBot):
@botcmd
def serverinfo(self, mess, args):
"""Displays information about the server"""
version = open('/proc/version').read().strip()
loadavg = open('/proc/loadavg').read().strip()
return '%snn%s' % (version, loadavg)
@botcmd
def time(self, mess, args):
"""Displays current server time"""
return str(datetime.datetime.now())
@botcmd
def rot13(self, mess, args):
"""Returns passed arguments rot13'ed"""
return args.encode('rot13')
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return mess.getFrom().getStripped()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SystemInfoJabberBot(JabberBot):
@botcmd
def serverinfo(self, mess, args):
"""Displays information about the server"""
version = open('/proc/version').read().strip()
loadavg = open('/proc/loadavg').read().strip()
return '%snn%s' % (version, loadavg)
@botcmd
def time(self, mess, args):
"""Displays current server time"""
return str(datetime.datetime.now())
@botcmd
def rot13(self, mess, args):
"""Returns passed arguments rot13'ed"""
return args.encode('rot13')
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return mess.getFrom().getStripped()
<|reserved_special_token_0|>
root.setLevel(logging.DEBUG)
<|reserved_special_token_0|>
ch.setLevel(logging.DEBUG)
<|reserved_special_token_0|>
ch.setFormatter(formatter)
root.addHandler(ch)
<|reserved_special_token_0|>
bot.join_room(chatroom, 'credilbot')
bot.send(adminuser, 'Hello Julien, je suis connecte')
bot.send(chatroom, 'Testing...', None, 'groupchat')
while 1:
bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')
time.sleep(5)
bot.serve_forever()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SystemInfoJabberBot(JabberBot):
@botcmd
def serverinfo(self, mess, args):
"""Displays information about the server"""
version = open('/proc/version').read().strip()
loadavg = open('/proc/loadavg').read().strip()
return '%snn%s' % (version, loadavg)
@botcmd
def time(self, mess, args):
"""Displays current server time"""
return str(datetime.datetime.now())
@botcmd
def rot13(self, mess, args):
"""Returns passed arguments rot13'ed"""
return args.encode('rot13')
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return mess.getFrom().getStripped()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
bot = SystemInfoJabberBot(username, password)
bot.join_room(chatroom, 'credilbot')
bot.send(adminuser, 'Hello Julien, je suis connecte')
bot.send(chatroom, 'Testing...', None, 'groupchat')
while 1:
bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')
time.sleep(5)
bot.serve_forever()
<|reserved_special_token_1|>
from jabberbot import JabberBot, botcmd
import datetime
import logging
import sys
import time;
from config import username, password, chatroom, adminuser
class SystemInfoJabberBot(JabberBot):
@botcmd
def serverinfo( self, mess, args):
"""Displays information about the server"""
version = open('/proc/version').read().strip()
loadavg = open('/proc/loadavg').read().strip()
return '%snn%s' % ( version, loadavg, )
@botcmd
def time( self, mess, args):
"""Displays current server time"""
return str(datetime.datetime.now())
@botcmd
def rot13( self, mess, args):
"""Returns passed arguments rot13'ed"""
return args.encode('rot13')
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return mess.getFrom().getStripped()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
bot = SystemInfoJabberBot(username,password)
bot.join_room(chatroom, 'credilbot')
bot.send(adminuser, 'Hello Julien, je suis connecte')
#print bot.muc_room_participants(chatroom);
bot.send(chatroom, 'Testing...', None, 'groupchat')
while 1:
bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')
time.sleep(5)
bot.serve_forever()
|
flexible
|
{
"blob_id": "c9872fb536fd6552e2a5353566305555808747f7",
"index": 1777,
"step-1": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n <mask token>\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n\n @botcmd\n def time(self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n\n @botcmd\n def time(self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\n<mask token>\nroot.setLevel(logging.DEBUG)\n<mask token>\nch.setLevel(logging.DEBUG)\n<mask token>\nch.setFormatter(formatter)\nroot.addHandler(ch)\n<mask token>\nbot.join_room(chatroom, 'credilbot')\nbot.send(adminuser, 'Hello Julien, je suis connecte')\nbot.send(chatroom, 'Testing...', None, 'groupchat')\nwhile 1:\n bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')\n time.sleep(5)\nbot.serve_forever()\n",
"step-4": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n\n @botcmd\n def time(self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\nroot = logging.getLogger()\nroot.setLevel(logging.DEBUG)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nroot.addHandler(ch)\nbot = SystemInfoJabberBot(username, password)\nbot.join_room(chatroom, 'credilbot')\nbot.send(adminuser, 'Hello Julien, je suis connecte')\nbot.send(chatroom, 'Testing...', None, 'groupchat')\nwhile 1:\n bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')\n time.sleep(5)\nbot.serve_forever()\n",
"step-5": "from jabberbot import JabberBot, botcmd\nimport datetime\nimport logging\nimport sys\nimport time;\n\nfrom config import username, password, chatroom, adminuser\n\nclass SystemInfoJabberBot(JabberBot):\n @botcmd\n def serverinfo( self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )\n\n @botcmd\n def time( self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13( self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\nroot = logging.getLogger()\nroot.setLevel(logging.DEBUG)\n\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nroot.addHandler(ch)\n\n\nbot = SystemInfoJabberBot(username,password)\nbot.join_room(chatroom, 'credilbot')\nbot.send(adminuser, 'Hello Julien, je suis connecte')\n#print bot.muc_room_participants(chatroom);\nbot.send(chatroom, 'Testing...', None, 'groupchat')\n\nwhile 1: \n\tbot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')\n\ttime.sleep(5)\n\nbot.serve_forever()\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
class TestTelegram(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from nldata.corpora import Telegram
import os
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from nldata.corpora import Telegram
import os
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
# telegram = Telegram(data_dir)
# it = telegram.split("train", n=20)
# samples = [s for s in it]
# self.assertEqual(len(samples), 20)
# list(map(print,samples))
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "5c1d81c973487f1b091e58a6ccf5947c3f2a7e6d",
"index": 1058,
"step-1": "<mask token>\n\n\nclass TestTelegram(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom nldata.corpora import Telegram\nimport os\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom nldata.corpora import Telegram\nimport os\n\n\nclass TestTelegram(unittest.TestCase):\n def test_export_iter(self):\n pass\n # telegram = Telegram(data_dir)\n # it = telegram.split(\"train\", n=20)\n # samples = [s for s in it]\n # self.assertEqual(len(samples), 20)\n # list(map(print,samples))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright (c) 2020 Open Collector, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import logging
import typing
from urllib.parse import urlparse
from sqlalchemy.orm import exc as orm_exc
from starlette.exceptions import HTTPException
from starlette.responses import JSONResponse, RedirectResponse, Response
from starlette.routing import Router
from starlette.status import HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND
from ...executor import async_
from ...middlewares import WithTemplates
from ...utils import ContextualHTTPEndpoint
from ..application import POOL_KEY
from ..models import AuxiliaryIdentityAttribute, UserPool
from ..utils import build_jwt_public_key_from_private_key
logger = logging.getLogger(__name__)
routes = Router()
class PoolHTTPEndpoint(ContextualHTTPEndpoint):
@property
def templates(self):
return lambda name, context={}, *args, **kwargs: (
typing.cast(WithTemplates, self.request).templates(
name,
{**context, "pool": self.request.scope.get(POOL_KEY)},
*args,
**kwargs,
)
)
@property
def pool(self) -> typing.Optional[UserPool]:
return typing.cast(typing.Optional[UserPool], self.request.get(POOL_KEY))
@property
def per_pool_session(self) -> typing.Dict[str, typing.Any]:
pool = self.pool
if pool is not None:
return self.request.scope["session"].setdefault(pool.key, {})
else:
return self.request.scope["session"]
async def dispatch(self):
if self.request.get(POOL_KEY) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND)
await super().dispatch()
@property
def success_page_url(self):
return self.request.url_for("pools:signin_success", pool=self.pool.key)
def bool_val(v: typing.Optional[str]) -> bool:
return v not in ("false", "no", "0", None)
@routes.route("/signin", name="signin")
class SigninEndpoint(PoolHTTPEndpoint):
template = "pools/signin.html"
@property
def back_to(self) -> typing.Optional[str]:
return self.request.session.get("back_to")
@back_to.setter
def back_to(self, value: typing.Optional[str]):
self.request.session["back_to"] = value
def render_template(self, context: typing.Dict[str, typing.Any] = {}) -> Response:
assert self.pool is not None
if self.pool.username_attributes:
email = AuxiliaryIdentityAttribute.EMAIL in self.pool.username_attributes
phone_number = (
AuxiliaryIdentityAttribute.PHONE_NUMBER in self.pool.username_attributes
)
if email and phone_number:
label = "E-mail address or phone number"
elif email:
label = "E-mail address"
elif phone_number:
label = "Phone number"
else:
raise AssertionError()
else:
label = "User name"
context["username_label"] = label
return self.templates(self.template, context=context)
async def get(self):
assert self.pool is not None
back_to = self.request.query_params.get("back_to")
reauth = bool_val(self.request.query_params.get("reauth"))
if self.request.user.is_authenticated and not reauth:
return RedirectResponse(back_to or self.success_page_url)
parsed_back_to = urlparse(back_to)
if (
parsed_back_to.scheme and parsed_back_to.scheme != self.request.url.scheme
) or (
parsed_back_to.hostname
and parsed_back_to.hostname != self.request.url.hostname
):
raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
if back_to is not None:
self.back_to = back_to
return self.render_template(context={"form": {"reauth": reauth}})
async def post(self):
assert self.pool is not None
form = await self.request.form()
try:
user = await async_(lambda: self.pool.query_user(form["username"]).one())()
self.request.app.state.kdf.verify(user.password, form["password"])
except Exception as e:
logger.debug(f"failed login attempt: {form['username']} - {e!r}")
return self.render_template(
context={
"form": form,
"alerts": ["No user registered with that user name and password."],
}
)
self.per_pool_session["user_id"] = user.id
return RedirectResponse(self.back_to or self.success_page_url, status_code=302)
@routes.route("/signin/success", name="signin_success")
class SignedinEndpoint(PoolHTTPEndpoint):
template = "pools/signin_success.html"
async def get(self):
return self.templates(self.template)
@routes.route("/signout", name="signout", methods=["post"])
class SignOutEndpoint(PoolHTTPEndpoint):
async def post(self):
form = await self.request.form()
client_id = form.get("client_id")
try:
client = await async_(
self.pool.clients.filter_by(oauth2_client_id=client_id).one
)()
except orm_exc.NoResultFound as e:
raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e
back_to = form.get("back_to")
if back_to is None or back_to not in client.logout_uris:
back_to = self.request.url_for("pools:signout_success", pool=self.pool.key)
if self.request.user.is_authenticated:
del self.per_pool_session["user_id"]
return RedirectResponse(back_to, status_code=302)
@routes.route("/signout/success", name="signout_success")
class SignedOutEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates("pools/signout_success.html")
@routes.route("/", name="index")
class IndexEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates("pools/index.html")
@routes.route("/.well-known/jwks.json", name="signin_success")
class JWKSEndpoint(PoolHTTPEndpoint):
async def get(self):
keys = []
if isinstance(self.request.app.state.jwt_config.key, dict):
public_jwk = build_jwt_public_key_from_private_key(
self.request.app.state.jwt_config.key
)
public_jwk["use"] = "sig"
keys.append(public_jwk)
return JSONResponse(
{
"keys": keys,
}
)
|
normal
|
{
"blob_id": "6e01e36170f3f08f2030dbd4dd91019936fb9f5c",
"index": 849,
"step-1": "<mask token>\n\n\n@routes.route('/signin', name='signin')\nclass SigninEndpoint(PoolHTTPEndpoint):\n <mask token>\n <mask token>\n\n @back_to.setter\n def back_to(self, value: typing.Optional[str]):\n self.request.session['back_to'] = value\n\n def render_template(self, context: typing.Dict[str, typing.Any]={}\n ) ->Response:\n assert self.pool is not None\n if self.pool.username_attributes:\n email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.\n username_attributes)\n phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self\n .pool.username_attributes)\n if email and phone_number:\n label = 'E-mail address or phone number'\n elif email:\n label = 'E-mail address'\n elif phone_number:\n label = 'Phone number'\n else:\n raise AssertionError()\n else:\n label = 'User name'\n context['username_label'] = label\n return self.templates(self.template, context=context)\n\n async def get(self):\n assert self.pool is not None\n back_to = self.request.query_params.get('back_to')\n reauth = bool_val(self.request.query_params.get('reauth'))\n if self.request.user.is_authenticated and not reauth:\n return RedirectResponse(back_to or self.success_page_url)\n parsed_back_to = urlparse(back_to)\n if (parsed_back_to.scheme and parsed_back_to.scheme != self.request\n .url.scheme or parsed_back_to.hostname and parsed_back_to.\n hostname != self.request.url.hostname):\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST)\n if back_to is not None:\n self.back_to = back_to\n return self.render_template(context={'form': {'reauth': reauth}})\n\n async def post(self):\n assert self.pool is not None\n form = await self.request.form()\n try:\n user = await async_(lambda : self.pool.query_user(form[\n 'username']).one())()\n self.request.app.state.kdf.verify(user.password, form['password'])\n except Exception as e:\n logger.debug(f\"failed login attempt: {form['username']} - {e!r}\")\n return self.render_template(context={'form': form, 'alerts': [\n 'No user registered with that user name and password.']})\n self.per_pool_session['user_id'] = user.id\n return RedirectResponse(self.back_to or self.success_page_url,\n status_code=302)\n\n\n@routes.route('/signin/success', name='signin_success')\nclass SignedinEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin_success.html'\n\n async def get(self):\n return self.templates(self.template)\n\n\n@routes.route('/signout', name='signout', methods=['post'])\nclass SignOutEndpoint(PoolHTTPEndpoint):\n\n async def post(self):\n form = await self.request.form()\n client_id = form.get('client_id')\n try:\n client = await async_(self.pool.clients.filter_by(\n oauth2_client_id=client_id).one)()\n except orm_exc.NoResultFound as e:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e\n back_to = form.get('back_to')\n if back_to is None or back_to not in client.logout_uris:\n back_to = self.request.url_for('pools:signout_success', pool=\n self.pool.key)\n if self.request.user.is_authenticated:\n del self.per_pool_session['user_id']\n return RedirectResponse(back_to, status_code=302)\n\n\n@routes.route('/signout/success', name='signout_success')\nclass SignedOutEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/signout_success.html')\n\n\n@routes.route('/', name='index')\nclass IndexEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/index.html')\n\n\n@routes.route('/.well-known/jwks.json', name='signin_success')\nclass JWKSEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n keys = []\n if isinstance(self.request.app.state.jwt_config.key, dict):\n public_jwk = build_jwt_public_key_from_private_key(self.request\n .app.state.jwt_config.key)\n public_jwk['use'] = 'sig'\n keys.append(public_jwk)\n return JSONResponse({'keys': keys})\n",
"step-2": "<mask token>\n\n\nclass PoolHTTPEndpoint(ContextualHTTPEndpoint):\n <mask token>\n <mask token>\n\n @property\n def per_pool_session(self) ->typing.Dict[str, typing.Any]:\n pool = self.pool\n if pool is not None:\n return self.request.scope['session'].setdefault(pool.key, {})\n else:\n return self.request.scope['session']\n\n async def dispatch(self):\n if self.request.get(POOL_KEY) is None:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND)\n await super().dispatch()\n <mask token>\n\n\n<mask token>\n\n\n@routes.route('/signin', name='signin')\nclass SigninEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin.html'\n\n @property\n def back_to(self) ->typing.Optional[str]:\n return self.request.session.get('back_to')\n\n @back_to.setter\n def back_to(self, value: typing.Optional[str]):\n self.request.session['back_to'] = value\n\n def render_template(self, context: typing.Dict[str, typing.Any]={}\n ) ->Response:\n assert self.pool is not None\n if self.pool.username_attributes:\n email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.\n username_attributes)\n phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self\n .pool.username_attributes)\n if email and phone_number:\n label = 'E-mail address or phone number'\n elif email:\n label = 'E-mail address'\n elif phone_number:\n label = 'Phone number'\n else:\n raise AssertionError()\n else:\n label = 'User name'\n context['username_label'] = label\n return self.templates(self.template, context=context)\n\n async def get(self):\n assert self.pool is not None\n back_to = self.request.query_params.get('back_to')\n reauth = bool_val(self.request.query_params.get('reauth'))\n if self.request.user.is_authenticated and not reauth:\n return RedirectResponse(back_to or self.success_page_url)\n parsed_back_to = urlparse(back_to)\n if (parsed_back_to.scheme and parsed_back_to.scheme != self.request\n .url.scheme or parsed_back_to.hostname and parsed_back_to.\n hostname != self.request.url.hostname):\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST)\n if back_to is not None:\n self.back_to = back_to\n return self.render_template(context={'form': {'reauth': reauth}})\n\n async def post(self):\n assert self.pool is not None\n form = await self.request.form()\n try:\n user = await async_(lambda : self.pool.query_user(form[\n 'username']).one())()\n self.request.app.state.kdf.verify(user.password, form['password'])\n except Exception as e:\n logger.debug(f\"failed login attempt: {form['username']} - {e!r}\")\n return self.render_template(context={'form': form, 'alerts': [\n 'No user registered with that user name and password.']})\n self.per_pool_session['user_id'] = user.id\n return RedirectResponse(self.back_to or self.success_page_url,\n status_code=302)\n\n\n@routes.route('/signin/success', name='signin_success')\nclass SignedinEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin_success.html'\n\n async def get(self):\n return self.templates(self.template)\n\n\n@routes.route('/signout', name='signout', methods=['post'])\nclass SignOutEndpoint(PoolHTTPEndpoint):\n\n async def post(self):\n form = await self.request.form()\n client_id = form.get('client_id')\n try:\n client = await async_(self.pool.clients.filter_by(\n oauth2_client_id=client_id).one)()\n except orm_exc.NoResultFound as e:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e\n back_to = form.get('back_to')\n if back_to is None or back_to not in client.logout_uris:\n back_to = self.request.url_for('pools:signout_success', pool=\n self.pool.key)\n if self.request.user.is_authenticated:\n del self.per_pool_session['user_id']\n return RedirectResponse(back_to, status_code=302)\n\n\n@routes.route('/signout/success', name='signout_success')\nclass SignedOutEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/signout_success.html')\n\n\n@routes.route('/', name='index')\nclass IndexEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/index.html')\n\n\n@routes.route('/.well-known/jwks.json', name='signin_success')\nclass JWKSEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n keys = []\n if isinstance(self.request.app.state.jwt_config.key, dict):\n public_jwk = build_jwt_public_key_from_private_key(self.request\n .app.state.jwt_config.key)\n public_jwk['use'] = 'sig'\n keys.append(public_jwk)\n return JSONResponse({'keys': keys})\n",
"step-3": "<mask token>\n\n\nclass PoolHTTPEndpoint(ContextualHTTPEndpoint):\n\n @property\n def templates(self):\n return lambda name, context={}, *args, **kwargs: typing.cast(\n WithTemplates, self.request).templates(name, {**context, 'pool':\n self.request.scope.get(POOL_KEY)}, *args, **kwargs)\n <mask token>\n\n @property\n def per_pool_session(self) ->typing.Dict[str, typing.Any]:\n pool = self.pool\n if pool is not None:\n return self.request.scope['session'].setdefault(pool.key, {})\n else:\n return self.request.scope['session']\n\n async def dispatch(self):\n if self.request.get(POOL_KEY) is None:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND)\n await super().dispatch()\n <mask token>\n\n\n<mask token>\n\n\n@routes.route('/signin', name='signin')\nclass SigninEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin.html'\n\n @property\n def back_to(self) ->typing.Optional[str]:\n return self.request.session.get('back_to')\n\n @back_to.setter\n def back_to(self, value: typing.Optional[str]):\n self.request.session['back_to'] = value\n\n def render_template(self, context: typing.Dict[str, typing.Any]={}\n ) ->Response:\n assert self.pool is not None\n if self.pool.username_attributes:\n email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.\n username_attributes)\n phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self\n .pool.username_attributes)\n if email and phone_number:\n label = 'E-mail address or phone number'\n elif email:\n label = 'E-mail address'\n elif phone_number:\n label = 'Phone number'\n else:\n raise AssertionError()\n else:\n label = 'User name'\n context['username_label'] = label\n return self.templates(self.template, context=context)\n\n async def get(self):\n assert self.pool is not None\n back_to = self.request.query_params.get('back_to')\n reauth = bool_val(self.request.query_params.get('reauth'))\n if self.request.user.is_authenticated and not reauth:\n return RedirectResponse(back_to or self.success_page_url)\n parsed_back_to = urlparse(back_to)\n if (parsed_back_to.scheme and parsed_back_to.scheme != self.request\n .url.scheme or parsed_back_to.hostname and parsed_back_to.\n hostname != self.request.url.hostname):\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST)\n if back_to is not None:\n self.back_to = back_to\n return self.render_template(context={'form': {'reauth': reauth}})\n\n async def post(self):\n assert self.pool is not None\n form = await self.request.form()\n try:\n user = await async_(lambda : self.pool.query_user(form[\n 'username']).one())()\n self.request.app.state.kdf.verify(user.password, form['password'])\n except Exception as e:\n logger.debug(f\"failed login attempt: {form['username']} - {e!r}\")\n return self.render_template(context={'form': form, 'alerts': [\n 'No user registered with that user name and password.']})\n self.per_pool_session['user_id'] = user.id\n return RedirectResponse(self.back_to or self.success_page_url,\n status_code=302)\n\n\n@routes.route('/signin/success', name='signin_success')\nclass SignedinEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin_success.html'\n\n async def get(self):\n return self.templates(self.template)\n\n\n@routes.route('/signout', name='signout', methods=['post'])\nclass SignOutEndpoint(PoolHTTPEndpoint):\n\n async def post(self):\n form = await self.request.form()\n client_id = form.get('client_id')\n try:\n client = await async_(self.pool.clients.filter_by(\n oauth2_client_id=client_id).one)()\n except orm_exc.NoResultFound as e:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e\n back_to = form.get('back_to')\n if back_to is None or back_to not in client.logout_uris:\n back_to = self.request.url_for('pools:signout_success', pool=\n self.pool.key)\n if self.request.user.is_authenticated:\n del self.per_pool_session['user_id']\n return RedirectResponse(back_to, status_code=302)\n\n\n@routes.route('/signout/success', name='signout_success')\nclass SignedOutEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/signout_success.html')\n\n\n@routes.route('/', name='index')\nclass IndexEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/index.html')\n\n\n@routes.route('/.well-known/jwks.json', name='signin_success')\nclass JWKSEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n keys = []\n if isinstance(self.request.app.state.jwt_config.key, dict):\n public_jwk = build_jwt_public_key_from_private_key(self.request\n .app.state.jwt_config.key)\n public_jwk['use'] = 'sig'\n keys.append(public_jwk)\n return JSONResponse({'keys': keys})\n",
"step-4": "import logging\nimport typing\nfrom urllib.parse import urlparse\nfrom sqlalchemy.orm import exc as orm_exc\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import JSONResponse, RedirectResponse, Response\nfrom starlette.routing import Router\nfrom starlette.status import HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND\nfrom ...executor import async_\nfrom ...middlewares import WithTemplates\nfrom ...utils import ContextualHTTPEndpoint\nfrom ..application import POOL_KEY\nfrom ..models import AuxiliaryIdentityAttribute, UserPool\nfrom ..utils import build_jwt_public_key_from_private_key\nlogger = logging.getLogger(__name__)\nroutes = Router()\n\n\nclass PoolHTTPEndpoint(ContextualHTTPEndpoint):\n\n @property\n def templates(self):\n return lambda name, context={}, *args, **kwargs: typing.cast(\n WithTemplates, self.request).templates(name, {**context, 'pool':\n self.request.scope.get(POOL_KEY)}, *args, **kwargs)\n\n @property\n def pool(self) ->typing.Optional[UserPool]:\n return typing.cast(typing.Optional[UserPool], self.request.get(\n POOL_KEY))\n\n @property\n def per_pool_session(self) ->typing.Dict[str, typing.Any]:\n pool = self.pool\n if pool is not None:\n return self.request.scope['session'].setdefault(pool.key, {})\n else:\n return self.request.scope['session']\n\n async def dispatch(self):\n if self.request.get(POOL_KEY) is None:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND)\n await super().dispatch()\n\n @property\n def success_page_url(self):\n return self.request.url_for('pools:signin_success', pool=self.pool.key)\n\n\ndef bool_val(v: typing.Optional[str]) ->bool:\n return v not in ('false', 'no', '0', None)\n\n\n@routes.route('/signin', name='signin')\nclass SigninEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin.html'\n\n @property\n def back_to(self) ->typing.Optional[str]:\n return self.request.session.get('back_to')\n\n @back_to.setter\n def back_to(self, value: typing.Optional[str]):\n self.request.session['back_to'] = value\n\n def render_template(self, context: typing.Dict[str, typing.Any]={}\n ) ->Response:\n assert self.pool is not None\n if self.pool.username_attributes:\n email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.\n username_attributes)\n phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self\n .pool.username_attributes)\n if email and phone_number:\n label = 'E-mail address or phone number'\n elif email:\n label = 'E-mail address'\n elif phone_number:\n label = 'Phone number'\n else:\n raise AssertionError()\n else:\n label = 'User name'\n context['username_label'] = label\n return self.templates(self.template, context=context)\n\n async def get(self):\n assert self.pool is not None\n back_to = self.request.query_params.get('back_to')\n reauth = bool_val(self.request.query_params.get('reauth'))\n if self.request.user.is_authenticated and not reauth:\n return RedirectResponse(back_to or self.success_page_url)\n parsed_back_to = urlparse(back_to)\n if (parsed_back_to.scheme and parsed_back_to.scheme != self.request\n .url.scheme or parsed_back_to.hostname and parsed_back_to.\n hostname != self.request.url.hostname):\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST)\n if back_to is not None:\n self.back_to = back_to\n return self.render_template(context={'form': {'reauth': reauth}})\n\n async def post(self):\n assert self.pool is not None\n form = await self.request.form()\n try:\n user = await async_(lambda : self.pool.query_user(form[\n 'username']).one())()\n self.request.app.state.kdf.verify(user.password, form['password'])\n except Exception as e:\n logger.debug(f\"failed login attempt: {form['username']} - {e!r}\")\n return self.render_template(context={'form': form, 'alerts': [\n 'No user registered with that user name and password.']})\n self.per_pool_session['user_id'] = user.id\n return RedirectResponse(self.back_to or self.success_page_url,\n status_code=302)\n\n\n@routes.route('/signin/success', name='signin_success')\nclass SignedinEndpoint(PoolHTTPEndpoint):\n template = 'pools/signin_success.html'\n\n async def get(self):\n return self.templates(self.template)\n\n\n@routes.route('/signout', name='signout', methods=['post'])\nclass SignOutEndpoint(PoolHTTPEndpoint):\n\n async def post(self):\n form = await self.request.form()\n client_id = form.get('client_id')\n try:\n client = await async_(self.pool.clients.filter_by(\n oauth2_client_id=client_id).one)()\n except orm_exc.NoResultFound as e:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e\n back_to = form.get('back_to')\n if back_to is None or back_to not in client.logout_uris:\n back_to = self.request.url_for('pools:signout_success', pool=\n self.pool.key)\n if self.request.user.is_authenticated:\n del self.per_pool_session['user_id']\n return RedirectResponse(back_to, status_code=302)\n\n\n@routes.route('/signout/success', name='signout_success')\nclass SignedOutEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/signout_success.html')\n\n\n@routes.route('/', name='index')\nclass IndexEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n return self.templates('pools/index.html')\n\n\n@routes.route('/.well-known/jwks.json', name='signin_success')\nclass JWKSEndpoint(PoolHTTPEndpoint):\n\n async def get(self):\n keys = []\n if isinstance(self.request.app.state.jwt_config.key, dict):\n public_jwk = build_jwt_public_key_from_private_key(self.request\n .app.state.jwt_config.key)\n public_jwk['use'] = 'sig'\n keys.append(public_jwk)\n return JSONResponse({'keys': keys})\n",
"step-5": "# Copyright (c) 2020 Open Collector, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport logging\nimport typing\nfrom urllib.parse import urlparse\n\nfrom sqlalchemy.orm import exc as orm_exc\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import JSONResponse, RedirectResponse, Response\nfrom starlette.routing import Router\nfrom starlette.status import HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND\n\nfrom ...executor import async_\nfrom ...middlewares import WithTemplates\nfrom ...utils import ContextualHTTPEndpoint\nfrom ..application import POOL_KEY\nfrom ..models import AuxiliaryIdentityAttribute, UserPool\nfrom ..utils import build_jwt_public_key_from_private_key\n\nlogger = logging.getLogger(__name__)\nroutes = Router()\n\n\nclass PoolHTTPEndpoint(ContextualHTTPEndpoint):\n @property\n def templates(self):\n return lambda name, context={}, *args, **kwargs: (\n typing.cast(WithTemplates, self.request).templates(\n name,\n {**context, \"pool\": self.request.scope.get(POOL_KEY)},\n *args,\n **kwargs,\n )\n )\n\n @property\n def pool(self) -> typing.Optional[UserPool]:\n return typing.cast(typing.Optional[UserPool], self.request.get(POOL_KEY))\n\n @property\n def per_pool_session(self) -> typing.Dict[str, typing.Any]:\n pool = self.pool\n if pool is not None:\n return self.request.scope[\"session\"].setdefault(pool.key, {})\n else:\n return self.request.scope[\"session\"]\n\n async def dispatch(self):\n if self.request.get(POOL_KEY) is None:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND)\n await super().dispatch()\n\n @property\n def success_page_url(self):\n return self.request.url_for(\"pools:signin_success\", pool=self.pool.key)\n\n\ndef bool_val(v: typing.Optional[str]) -> bool:\n return v not in (\"false\", \"no\", \"0\", None)\n\n\n@routes.route(\"/signin\", name=\"signin\")\nclass SigninEndpoint(PoolHTTPEndpoint):\n template = \"pools/signin.html\"\n\n @property\n def back_to(self) -> typing.Optional[str]:\n return self.request.session.get(\"back_to\")\n\n @back_to.setter\n def back_to(self, value: typing.Optional[str]):\n self.request.session[\"back_to\"] = value\n\n def render_template(self, context: typing.Dict[str, typing.Any] = {}) -> Response:\n assert self.pool is not None\n if self.pool.username_attributes:\n email = AuxiliaryIdentityAttribute.EMAIL in self.pool.username_attributes\n phone_number = (\n AuxiliaryIdentityAttribute.PHONE_NUMBER in self.pool.username_attributes\n )\n if email and phone_number:\n label = \"E-mail address or phone number\"\n elif email:\n label = \"E-mail address\"\n elif phone_number:\n label = \"Phone number\"\n else:\n raise AssertionError()\n else:\n label = \"User name\"\n context[\"username_label\"] = label\n return self.templates(self.template, context=context)\n\n async def get(self):\n assert self.pool is not None\n back_to = self.request.query_params.get(\"back_to\")\n reauth = bool_val(self.request.query_params.get(\"reauth\"))\n if self.request.user.is_authenticated and not reauth:\n return RedirectResponse(back_to or self.success_page_url)\n parsed_back_to = urlparse(back_to)\n if (\n parsed_back_to.scheme and parsed_back_to.scheme != self.request.url.scheme\n ) or (\n parsed_back_to.hostname\n and parsed_back_to.hostname != self.request.url.hostname\n ):\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST)\n if back_to is not None:\n self.back_to = back_to\n return self.render_template(context={\"form\": {\"reauth\": reauth}})\n\n async def post(self):\n assert self.pool is not None\n form = await self.request.form()\n try:\n user = await async_(lambda: self.pool.query_user(form[\"username\"]).one())()\n self.request.app.state.kdf.verify(user.password, form[\"password\"])\n except Exception as e:\n logger.debug(f\"failed login attempt: {form['username']} - {e!r}\")\n return self.render_template(\n context={\n \"form\": form,\n \"alerts\": [\"No user registered with that user name and password.\"],\n }\n )\n self.per_pool_session[\"user_id\"] = user.id\n return RedirectResponse(self.back_to or self.success_page_url, status_code=302)\n\n\n@routes.route(\"/signin/success\", name=\"signin_success\")\nclass SignedinEndpoint(PoolHTTPEndpoint):\n template = \"pools/signin_success.html\"\n\n async def get(self):\n return self.templates(self.template)\n\n\n@routes.route(\"/signout\", name=\"signout\", methods=[\"post\"])\nclass SignOutEndpoint(PoolHTTPEndpoint):\n async def post(self):\n form = await self.request.form()\n client_id = form.get(\"client_id\")\n try:\n client = await async_(\n self.pool.clients.filter_by(oauth2_client_id=client_id).one\n )()\n except orm_exc.NoResultFound as e:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e\n back_to = form.get(\"back_to\")\n if back_to is None or back_to not in client.logout_uris:\n back_to = self.request.url_for(\"pools:signout_success\", pool=self.pool.key)\n if self.request.user.is_authenticated:\n del self.per_pool_session[\"user_id\"]\n return RedirectResponse(back_to, status_code=302)\n\n\n@routes.route(\"/signout/success\", name=\"signout_success\")\nclass SignedOutEndpoint(PoolHTTPEndpoint):\n async def get(self):\n return self.templates(\"pools/signout_success.html\")\n\n\n@routes.route(\"/\", name=\"index\")\nclass IndexEndpoint(PoolHTTPEndpoint):\n async def get(self):\n return self.templates(\"pools/index.html\")\n\n\n@routes.route(\"/.well-known/jwks.json\", name=\"signin_success\")\nclass JWKSEndpoint(PoolHTTPEndpoint):\n async def get(self):\n keys = []\n if isinstance(self.request.app.state.jwt_config.key, dict):\n public_jwk = build_jwt_public_key_from_private_key(\n self.request.app.state.jwt_config.key\n )\n public_jwk[\"use\"] = \"sig\"\n keys.append(public_jwk)\n return JSONResponse(\n {\n \"keys\": keys,\n }\n )\n",
"step-ids": [
9,
13,
14,
19,
20
]
}
|
[
9,
13,
14,
19,
20
] |
"""
Task. Given two integers a and b, find their greatest common divisor.
Input Format. The two integers a, b are given in the same line separated by space.
Constraints. 1<=a,b<=2·109.
Output Format. Output GCD(a, b).
"""
def EuclidGCD(a, b):
if b == 0:
return a
else:
a = a%b
return EuclidGCD(b, a)
in_ = [int(n) for n in input().split()]
print(EuclidGCD(in_[0], in_[1]))
|
normal
|
{
"blob_id": "39d82267f966ca106ee384e540c31a3e5e433318",
"index": 2248,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\n<mask token>\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-4": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\nin_ = [int(n) for n in input().split()]\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-5": "\"\"\"\nTask. Given two integers a and b, find their greatest common divisor.\nInput Format. The two integers a, b are given in the same line separated by space.\nConstraints. 1<=a,b<=2·109.\nOutput Format. Output GCD(a, b).\n\"\"\"\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a%b\n return EuclidGCD(b, a)\n\nin_ = [int(n) for n in input().split()]\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.figure(figsize=(20, 8), dpi=128)
plt.barh(a, b, height=0.5, color='red')
plt.title('2018年电影票房纪录', fontsize=24)
plt.xlabel('票房(亿元)', fontsize=14)
<|reserved_special_token_0|>
plt.xticks(my_x_ticks)
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
a = ['战狼2', '速度与激情8', '功夫瑜伽', '西游伏妖篇', '变形金刚5:最后的骑士', '摔跤吧!爸爸',
'加勒比海盗5:死无对证', '金刚:骷髅岛', '极限特工:终极回归', '生化危机6:终章', '乘风破浪', '神偷奶爸3',
'智取威虎山', '大闹天竺', '金刚狼3:殊死一战', '蜘蛛侠:英雄归来', '悟空传', '银河护卫队2', '情圣', '新木乃伊']
b = [56.01, 26.94, 17.53, 16.49, 15.45, 12.96, 11.8, 11.61, 11.28, 11.12,
10.49, 10.3, 8.75, 7.55, 7.32, 6.99, 6.88, 6.86, 6.58, 6.23]
plt.figure(figsize=(20, 8), dpi=128)
plt.barh(a, b, height=0.5, color='red')
plt.title('2018年电影票房纪录', fontsize=24)
plt.xlabel('票房(亿元)', fontsize=14)
my_x_ticks = np.arange(0, 61, 5)
plt.xticks(my_x_ticks)
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
a = ['战狼2', '速度与激情8', '功夫瑜伽', '西游伏妖篇', '变形金刚5:最后的骑士', '摔跤吧!爸爸',
'加勒比海盗5:死无对证', '金刚:骷髅岛', '极限特工:终极回归', '生化危机6:终章', '乘风破浪', '神偷奶爸3',
'智取威虎山', '大闹天竺', '金刚狼3:殊死一战', '蜘蛛侠:英雄归来', '悟空传', '银河护卫队2', '情圣', '新木乃伊']
b = [56.01, 26.94, 17.53, 16.49, 15.45, 12.96, 11.8, 11.61, 11.28, 11.12,
10.49, 10.3, 8.75, 7.55, 7.32, 6.99, 6.88, 6.86, 6.58, 6.23]
plt.figure(figsize=(20, 8), dpi=128)
plt.barh(a, b, height=0.5, color='red')
plt.title('2018年电影票房纪录', fontsize=24)
plt.xlabel('票房(亿元)', fontsize=14)
my_x_ticks = np.arange(0, 61, 5)
plt.xticks(my_x_ticks)
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
plt.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# 导入包
import matplotlib.pyplot as plt
import numpy as np
# 显示中文和显示负号
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# X轴和Y轴数据,票房单位亿
a = ["战狼2","速度与激情8","功夫瑜伽","西游伏妖篇","变形金刚5:最后的骑士","摔跤吧!爸爸","加勒比海盗5:死无对证","金刚:骷髅岛","极限特工:终极回归","生化危机6:终章","乘风破浪","神偷奶爸3","智取威虎山","大闹天竺","金刚狼3:殊死一战","蜘蛛侠:英雄归来","悟空传","银河护卫队2","情圣","新木乃伊",]
b = [56.01,26.94,17.53,16.49,15.45,12.96,11.8,11.61,11.28,11.12,10.49,10.3,8.75,7.55,7.32,6.99,6.88,6.86,6.58,6.23]
# 设置图形的大小
plt.figure(figsize=(20, 8), dpi=128)
# 绘制横置条形图,x轴参数是一个可迭代对象,一般为列表
# 竖直条形图,用的是width设置宽度
plt.barh(a, b, height=0.5, color='red')
# 设置图片,X轴,Y轴标题
plt.title("2018年电影票房纪录", fontsize=24)
plt.xlabel("票房(亿元)", fontsize=14)
# 设置坐标轴刻度,刻度间隔,range不能设置步长
my_x_ticks = np.arange(0, 61, 5)
plt.xticks(my_x_ticks)
# 设置网格
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
# 显示图形
plt.show()
|
flexible
|
{
"blob_id": "16d86c48c45ab0441046e968ea364d27f6dcfd12",
"index": 3066,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.figure(figsize=(20, 8), dpi=128)\nplt.barh(a, b, height=0.5, color='red')\nplt.title('2018年电影票房纪录', fontsize=24)\nplt.xlabel('票房(亿元)', fontsize=14)\n<mask token>\nplt.xticks(my_x_ticks)\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\nplt.show()\n",
"step-3": "<mask token>\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\na = ['战狼2', '速度与激情8', '功夫瑜伽', '西游伏妖篇', '变形金刚5:最后的骑士', '摔跤吧!爸爸',\n '加勒比海盗5:死无对证', '金刚:骷髅岛', '极限特工:终极回归', '生化危机6:终章', '乘风破浪', '神偷奶爸3',\n '智取威虎山', '大闹天竺', '金刚狼3:殊死一战', '蜘蛛侠:英雄归来', '悟空传', '银河护卫队2', '情圣', '新木乃伊']\nb = [56.01, 26.94, 17.53, 16.49, 15.45, 12.96, 11.8, 11.61, 11.28, 11.12, \n 10.49, 10.3, 8.75, 7.55, 7.32, 6.99, 6.88, 6.86, 6.58, 6.23]\nplt.figure(figsize=(20, 8), dpi=128)\nplt.barh(a, b, height=0.5, color='red')\nplt.title('2018年电影票房纪录', fontsize=24)\nplt.xlabel('票房(亿元)', fontsize=14)\nmy_x_ticks = np.arange(0, 61, 5)\nplt.xticks(my_x_ticks)\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\na = ['战狼2', '速度与激情8', '功夫瑜伽', '西游伏妖篇', '变形金刚5:最后的骑士', '摔跤吧!爸爸',\n '加勒比海盗5:死无对证', '金刚:骷髅岛', '极限特工:终极回归', '生化危机6:终章', '乘风破浪', '神偷奶爸3',\n '智取威虎山', '大闹天竺', '金刚狼3:殊死一战', '蜘蛛侠:英雄归来', '悟空传', '银河护卫队2', '情圣', '新木乃伊']\nb = [56.01, 26.94, 17.53, 16.49, 15.45, 12.96, 11.8, 11.61, 11.28, 11.12, \n 10.49, 10.3, 8.75, 7.55, 7.32, 6.99, 6.88, 6.86, 6.58, 6.23]\nplt.figure(figsize=(20, 8), dpi=128)\nplt.barh(a, b, height=0.5, color='red')\nplt.title('2018年电影票房纪录', fontsize=24)\nplt.xlabel('票房(亿元)', fontsize=14)\nmy_x_ticks = np.arange(0, 61, 5)\nplt.xticks(my_x_ticks)\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# 导入包\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 显示中文和显示负号\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n# X轴和Y轴数据,票房单位亿\na = [\"战狼2\",\"速度与激情8\",\"功夫瑜伽\",\"西游伏妖篇\",\"变形金刚5:最后的骑士\",\"摔跤吧!爸爸\",\"加勒比海盗5:死无对证\",\"金刚:骷髅岛\",\"极限特工:终极回归\",\"生化危机6:终章\",\"乘风破浪\",\"神偷奶爸3\",\"智取威虎山\",\"大闹天竺\",\"金刚狼3:殊死一战\",\"蜘蛛侠:英雄归来\",\"悟空传\",\"银河护卫队2\",\"情圣\",\"新木乃伊\",]\nb = [56.01,26.94,17.53,16.49,15.45,12.96,11.8,11.61,11.28,11.12,10.49,10.3,8.75,7.55,7.32,6.99,6.88,6.86,6.58,6.23]\n\n# 设置图形的大小\nplt.figure(figsize=(20, 8), dpi=128)\n\n# 绘制横置条形图,x轴参数是一个可迭代对象,一般为列表\n# 竖直条形图,用的是width设置宽度\nplt.barh(a, b, height=0.5, color='red')\n\n# 设置图片,X轴,Y轴标题\nplt.title(\"2018年电影票房纪录\", fontsize=24)\nplt.xlabel(\"票房(亿元)\", fontsize=14)\n\n# 设置坐标轴刻度,刻度间隔,range不能设置步长\nmy_x_ticks = np.arange(0, 61, 5)\nplt.xticks(my_x_ticks)\n\n# 设置网格\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\n\n# 显示图形\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import wfdb as wf
import numpy as np
from scipy import signal as ss
from datasets import mitdb as dm
from matplotlib import pyplot as plt
def show_path(path):
""" As a plot """
# Read in the data
record = wf.rdsamp(path)
annotation = wf.rdann(path, 'atr')
data = record.p_signals
cha = data[:, 0]
print 'Channel type:', record.signame[0]
times = np.arange(len(cha), dtype = float)
times /= record.fs
plt.plot(times, cha)
plt.xlabel('Time [s]')
plt.show()
def show_annotations(path):
""" Exemplary code """
record = wf.rdsamp(path)
annotation = wf.rdann(path, 'atr')
# Get data and annotations for the first 2000 samples
howmany = 2000
channel = record.p_signals[:howmany, 0]
# Extract all of the annotation related infromation
where = annotation.annsamp < howmany
samp = annotation.annsamp[where]
# Convert to numpy.array to get fancy indexing access
types = np.array(annotation.anntype)
types = types[where]
times = np.arange(howmany, dtype = 'float') / record.fs
plt.plot(times, channel)
# Prepare qrs information for the plot
qrs_times = times[samp]
# Scale to show markers at the top
qrs_values = np.ones_like(qrs_times)
qrs_values *= channel.max() * 1.4
plt.plot(qrs_times, qrs_values, 'ro')
# Also show annotation code
# And their words
for it, sam in enumerate(samp):
# Get the annotation position
xa = times[sam]
ya = channel.max() * 1.1
# Use just the first letter
a_txt = types[it]
plt.annotate(a_txt, xy = (xa, ya))
plt.xlim([0, 4])
plt.xlabel('Time [s]')
plt.show()
def show_objective():
""" For the model """
# Choose a record
records = dm.get_records()
path = records[17]
record = wf.rdsamp(path)
ann = wf.rdann(path, 'atr')
chid = 0
print 'Channel:', record.signame[chid]
cha = record.p_signals[:, chid]
# These were found manually
sta = 184000
end = sta + 1000
times = np.arange(end-sta, dtype = 'float')
times /= record.fs
# Extract the annotations for that fragment
where = (sta < ann.annsamp) & (ann.annsamp < end)
samples = ann.annsamp[where] - sta
print samples
# Prepare dirac-comb type of labels
qrs_values = np.zeros_like(times)
qrs_values[samples] = 1
# Prepare gaussian-comb type of labels
kernel = ss.hamming(36)
qrs_gauss = np.convolve(kernel,
qrs_values,
mode = 'same')
# Make the plots
fig = plt.figure()
ax1 = fig.add_subplot(3,1,1)
ax1.plot(times, cha[sta : end])
ax2 = fig.add_subplot(3,1,2, sharex=ax1)
ax2.plot(times,
qrs_values,
'C1',
lw = 4,
alpha = 0.888)
ax3 = fig.add_subplot(3,1,3, sharex=ax1)
ax3.plot(times,
qrs_gauss,
'C3',
lw = 4,
alpha = 0.888)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.xlabel('Time [s]')
plt.xlim([0, 2.5])
plt.show()
def show_objective_part2():
""" For the model """
# Choose a record
records = dm.get_records()
path = records[13]
record = wf.rdsamp(path)
ann = wf.rdann(path, 'atr')
chid = 0
print 'File:', path
print 'Channel:', record.signame[chid]
cha = record.p_signals[:, chid]
# These were found manually
sta = 184000
end = sta + 1000
times = np.arange(end-sta, dtype = 'float')
times /= record.fs
# Extract the annotations for that fragment
where = (sta < ann.annsamp) & (ann.annsamp < end)
samples = ann.annsamp[where] - sta
print samples
# Prepare dirac-comb type of labels
qrs_values = np.zeros_like(times)
qrs_values[samples] = 1
# Prepare gaussian-comb type of labels
kernel = ss.hamming(36)
qrs_gauss = np.convolve(kernel,
qrs_values,
mode = 'same')
# Make the plots
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax1.plot(times, cha[sta : end])
ax1.set_title('Input', loc = 'left')
ax2 = fig.add_subplot(2,1,2, sharex=ax1)
ax2.plot(times,
qrs_gauss,
'C3',
lw = 4,
alpha = 0.888)
ax2.set_title('Output', loc = 'left')
ax1.grid()
ax2.grid()
plt.setp(ax1.get_xticklabels(), visible=False)
plt.xlabel('Time [s]')
plt.xlim([0, 2.5])
plt.show()
|
normal
|
{
"blob_id": "4ba722e685c7608fcfd5111131c96847c0408a02",
"index": 1905,
"step-1": "import wfdb as wf\nimport numpy as np\nfrom scipy import signal as ss\nfrom datasets import mitdb as dm\nfrom matplotlib import pyplot as plt\n\ndef show_path(path):\n \"\"\" As a plot \"\"\"\n # Read in the data\n record = wf.rdsamp(path)\n annotation = wf.rdann(path, 'atr')\n data = record.p_signals\n cha = data[:, 0]\n print 'Channel type:', record.signame[0]\n times = np.arange(len(cha), dtype = float)\n times /= record.fs\n plt.plot(times, cha)\n plt.xlabel('Time [s]')\n plt.show()\n\ndef show_annotations(path):\n \"\"\" Exemplary code \"\"\"\n record = wf.rdsamp(path)\n annotation = wf.rdann(path, 'atr')\n\n # Get data and annotations for the first 2000 samples\n howmany = 2000\n channel = record.p_signals[:howmany, 0]\n\n # Extract all of the annotation related infromation\n where = annotation.annsamp < howmany\n samp = annotation.annsamp[where]\n\n # Convert to numpy.array to get fancy indexing access\n types = np.array(annotation.anntype)\n types = types[where]\n\n times = np.arange(howmany, dtype = 'float') / record.fs\n plt.plot(times, channel)\n\n # Prepare qrs information for the plot\n qrs_times = times[samp]\n\n # Scale to show markers at the top \n qrs_values = np.ones_like(qrs_times)\n qrs_values *= channel.max() * 1.4\n\n plt.plot(qrs_times, qrs_values, 'ro')\n\n # Also show annotation code\n # And their words\n for it, sam in enumerate(samp):\n # Get the annotation position\n xa = times[sam]\n ya = channel.max() * 1.1\n\n # Use just the first letter \n a_txt = types[it]\n plt.annotate(a_txt, xy = (xa, ya))\n\n plt.xlim([0, 4])\n plt.xlabel('Time [s]')\n plt.show()\n\ndef show_objective():\n \"\"\" For the model \"\"\"\n # Choose a record\n records = dm.get_records()\n path = records[17]\n record = wf.rdsamp(path)\n ann = wf.rdann(path, 'atr')\n\n chid = 0\n print 'Channel:', record.signame[chid]\n\n cha = record.p_signals[:, chid]\n\n # These were found manually\n sta = 184000\n end = sta + 1000\n times = np.arange(end-sta, dtype = 'float')\n times /= record.fs\n\n # Extract the annotations for that fragment\n where = (sta < ann.annsamp) & (ann.annsamp < end)\n samples = ann.annsamp[where] - sta\n print samples\n\n # Prepare dirac-comb type of labels\n qrs_values = np.zeros_like(times)\n qrs_values[samples] = 1\n\n # Prepare gaussian-comb type of labels\n kernel = ss.hamming(36)\n qrs_gauss = np.convolve(kernel,\n qrs_values,\n mode = 'same')\n\n # Make the plots\n fig = plt.figure()\n ax1 = fig.add_subplot(3,1,1)\n ax1.plot(times, cha[sta : end])\n\n ax2 = fig.add_subplot(3,1,2, sharex=ax1)\n ax2.plot(times,\n qrs_values,\n 'C1',\n lw = 4,\n alpha = 0.888)\n ax3 = fig.add_subplot(3,1,3, sharex=ax1)\n ax3.plot(times,\n qrs_gauss,\n 'C3',\n lw = 4,\n alpha = 0.888)\n plt.setp(ax1.get_xticklabels(), visible=False)\n plt.setp(ax2.get_xticklabels(), visible=False)\n plt.xlabel('Time [s]')\n plt.xlim([0, 2.5])\n plt.show()\n\ndef show_objective_part2():\n \"\"\" For the model \"\"\"\n # Choose a record\n records = dm.get_records()\n path = records[13]\n record = wf.rdsamp(path)\n ann = wf.rdann(path, 'atr')\n\n chid = 0\n print 'File:', path\n print 'Channel:', record.signame[chid]\n\n cha = record.p_signals[:, chid]\n\n # These were found manually\n sta = 184000\n end = sta + 1000\n times = np.arange(end-sta, dtype = 'float')\n times /= record.fs\n\n # Extract the annotations for that fragment\n where = (sta < ann.annsamp) & (ann.annsamp < end)\n samples = ann.annsamp[where] - sta\n print samples\n\n # Prepare dirac-comb type of labels\n qrs_values = np.zeros_like(times)\n qrs_values[samples] = 1\n\n # Prepare gaussian-comb type of labels\n kernel = ss.hamming(36)\n qrs_gauss = np.convolve(kernel,\n qrs_values,\n mode = 'same')\n\n # Make the plots\n fig = plt.figure()\n ax1 = fig.add_subplot(2,1,1)\n ax1.plot(times, cha[sta : end])\n ax1.set_title('Input', loc = 'left')\n\n ax2 = fig.add_subplot(2,1,2, sharex=ax1)\n ax2.plot(times,\n qrs_gauss,\n 'C3',\n lw = 4,\n alpha = 0.888)\n ax2.set_title('Output', loc = 'left')\n ax1.grid()\n ax2.grid()\n plt.setp(ax1.get_xticklabels(), visible=False)\n plt.xlabel('Time [s]')\n plt.xlim([0, 2.5])\n plt.show()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"])
cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"])
cash_=cash[0]["cash"]
#store all the data into a dict so its easier to pass in to html
display=[]
total_share=0
for row in rows:
symbol=str(row["symbol"])
print(symbol)
name=lookup(symbol)["name"]
shares=int(row["amount"])
price=float(lookup(symbol)["price"])
total=float(shares) *price
total_share+=total
display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})
total_money=total_share+cash[0]["cash"]
return render_template("index.html",display=display,total_money=total_money,cash=cash_)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide symbol", 400)
# Ensure shares was submitted
elif not request.form.get("shares"):
return apology("must provide shares", 400)
if not request.form.get("shares").isdigit():
return apology("must be integer",400)
elif int(request.form.get("shares"))<1 :
return apology("must be positive integer", 400)
elif lookup(request.form.get("symbol"))==None:
return apology("Must be a valid symbol",400)
#ensure money>price
quote=lookup(request.form.get("symbol"))
shares=request.form.get("shares")
cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"])
if cash[0]["cash"]<int(quote["price"])*int(shares):
return apology("You can't affort this/these",400)
#BUY, STORE DATA IN REPOSITORY AND RECORD
#record this transaction
db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))",session["user_id"],int(shares),quote["symbol"],float(quote["price"]))
#deduct the cash
total=int(quote["price"])*int(shares)
db.execute("UPDATE users SET cash=cash- (?) WHERE id=?",total,session["user_id"])
return redirect("/")
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
rows=db.execute("SELECT * FROM record ORDER BY t1")
return render_template("history.html",rows=rows)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method=="POST":
quote=lookup(request.form.get("symbol"))
if quote==None:
return apology("Invalid symbol",400)
price=usd(quote["price"])
return render_template("quoted.html",quote=quote,price=price)
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 400)
# Ensure comfirm password was submitted
elif not request.form.get("confirmation"):
return apology("must comfirm password", 400)
# Ensure password matches
elif request.form.get("confirmation") != request.form.get("password"):
return apology("Password not matches",400)
# Ensure username is new(unique)
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
if len(rows) != 0:
return apology("username used", 400)
db.execute("INSERT INTO users (username,hash) VALUES (?,?)",request.form.get("username"),generate_password_hash(request.form.get("password")))
# Redirect user to home page
return redirect("/")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
if request.method=='POST':
#parameter is not filled
if not request.form.get("shares"):
return apology("Please enter how much u want to sell",400)
#check if shares(amount) that are going to be sell less than owner's share.
sell=request.form.get("symbol")
shares=request.form.get("shares")
amount=db.execute("SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions",session["user_id"],sell)
if amount[0]["amount"]<int(shares):
return apology("You dont own that much shares",400)
#record sell and add cash amount
quote=lookup(sell)
price=quote["price"]
total=int(price)*int(shares)
db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))",session["user_id"],(int(shares)*-1),quote["symbol"],price)
db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"])
return redirect("/")
else:
rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"])
return render_template("sell.html",rows=rows)
@app.route("/HAX", methods=["GET", "POST"])
@login_required
def HAX():
#add free monei boiiii
if request.method=="POST":
total=request.form.get("HAX")
db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect("/")
else:
return render_template("HAX.html")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
normal
|
{
"blob_id": "c66f4ee5719f764c8c713c23815302c00b6fb9af",
"index": 310,
"step-1": "<mask token>\n\n\n@app.route('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n<mask token>\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n<mask token>\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.after_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\n<mask token>\n\n\n@app.route('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n@app.route('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n<mask token>\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n@app.route('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n<mask token>\n\n\n@app.route('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\n@app.after_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\n@app.route('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n@app.route('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n@app.route('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n@app.route('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n@app.route('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\n@app.route('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n",
"step-4": "import os\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom helpers import apology, login_required, lookup, usd\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\n@app.after_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\n@app.route('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\n@app.route('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n@app.route('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n@app.route('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n@app.route('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\n@app.route('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n",
"step-5": "import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import apology, login_required, lookup, usd\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n\n# Ensure responses aren't cached\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n# Make sure API key is set\nif not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n\n\n@app.route(\"/\")\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n cash_=cash[0][\"cash\"]\n\n #store all the data into a dict so its easier to pass in to html\n display=[]\n total_share=0\n for row in rows:\n symbol=str(row[\"symbol\"])\n print(symbol)\n name=lookup(symbol)[\"name\"]\n shares=int(row[\"amount\"])\n price=float(lookup(symbol)[\"price\"])\n total=float(shares) *price\n total_share+=total\n display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})\n\n total_money=total_share+cash[0][\"cash\"]\n return render_template(\"index.html\",display=display,total_money=total_money,cash=cash_)\n\n\n\n@app.route(\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")\n\n@app.route(\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\n@app.route(\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method==\"POST\":\n quote=lookup(request.form.get(\"symbol\"))\n if quote==None:\n return apology(\"Invalid symbol\",400)\n price=usd(quote[\"price\"])\n return render_template(\"quoted.html\",quote=quote,price=price)\n else:\n return render_template(\"quote.html\")\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n\n # Ensure comfirm password was submitted\n elif not request.form.get(\"confirmation\"):\n return apology(\"must comfirm password\", 400)\n\n # Ensure password matches\n elif request.form.get(\"confirmation\") != request.form.get(\"password\"):\n return apology(\"Password not matches\",400)\n\n # Ensure username is new(unique)\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n if len(rows) != 0:\n return apology(\"username used\", 400)\n\n db.execute(\"INSERT INTO users (username,hash) VALUES (?,?)\",request.form.get(\"username\"),generate_password_hash(request.form.get(\"password\")))\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n\n else:\n return render_template(\"register.html\")\n\n\n@app.route(\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method=='POST':\n #parameter is not filled\n if not request.form.get(\"shares\"):\n return apology(\"Please enter how much u want to sell\",400)\n #check if shares(amount) that are going to be sell less than owner's share.\n sell=request.form.get(\"symbol\")\n shares=request.form.get(\"shares\")\n amount=db.execute(\"SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions\",session[\"user_id\"],sell)\n if amount[0][\"amount\"]<int(shares):\n return apology(\"You dont own that much shares\",400)\n\n #record sell and add cash amount\n quote=lookup(sell)\n price=quote[\"price\"]\n total=int(price)*int(shares)\n\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\",session[\"user_id\"],(int(shares)*-1),quote[\"symbol\"],price)\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n\n return render_template(\"sell.html\",rows=rows)\n\n\n\n@app.route(\"/HAX\", methods=[\"GET\", \"POST\"])\n@login_required\ndef HAX():\n #add free monei boiiii\n if request.method==\"POST\":\n total=request.form.get(\"HAX\")\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n\n return redirect(\"/\")\n\n else:\n return render_template(\"HAX.html\")\n\n\n\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n",
"step-ids": [
3,
9,
13,
14,
15
]
}
|
[
3,
9,
13,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv[1:]) == 5:
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int
(pos) for pos in sys.argv[1:]]
elif len(sys.argv[1:]) == 4:
name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in
sys.argv[1:]]
second_note_pos = None
else:
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5,
3, 4, 2, 1)
<|reserved_special_token_0|>
writer.writerow(('column', 'start', 'length'))
for row in reader:
try:
if not row[name_pos].strip() or row[name_pos].strip() in blacklist:
continue
except IndexError:
continue
if second_note_pos is not None and row[second_note_pos].strip():
col_name = '; '.join(name.strip() for name in (row[name_pos], row[
first_note_pos], row[second_note_pos]))
elif row[first_note_pos].strip():
col_name = '; '.join(name.strip() for name in (row[name_pos], row[
first_note_pos]))
else:
col_name = row[name_pos].strip()
col_start = int(row[start_pos].split('-')[0].strip())
col_length = int(float(row[length_pos])) - 1
writer.writerow((col_name, col_start, col_length))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv[1:]) == 5:
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int
(pos) for pos in sys.argv[1:]]
elif len(sys.argv[1:]) == 4:
name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in
sys.argv[1:]]
second_note_pos = None
else:
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5,
3, 4, 2, 1)
blacklist = 'Blank', 'semicolon filler'
reader = csv.reader(sys.stdin)
writer = csv.writer(sys.stdout)
writer.writerow(('column', 'start', 'length'))
for row in reader:
try:
if not row[name_pos].strip() or row[name_pos].strip() in blacklist:
continue
except IndexError:
continue
if second_note_pos is not None and row[second_note_pos].strip():
col_name = '; '.join(name.strip() for name in (row[name_pos], row[
first_note_pos], row[second_note_pos]))
elif row[first_note_pos].strip():
col_name = '; '.join(name.strip() for name in (row[name_pos], row[
first_note_pos]))
else:
col_name = row[name_pos].strip()
col_start = int(row[start_pos].split('-')[0].strip())
col_length = int(float(row[length_pos])) - 1
writer.writerow((col_name, col_start, col_length))
<|reserved_special_token_1|>
import csv
import sys
if len(sys.argv[1:]) == 5:
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int
(pos) for pos in sys.argv[1:]]
elif len(sys.argv[1:]) == 4:
name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in
sys.argv[1:]]
second_note_pos = None
else:
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5,
3, 4, 2, 1)
blacklist = 'Blank', 'semicolon filler'
reader = csv.reader(sys.stdin)
writer = csv.writer(sys.stdout)
writer.writerow(('column', 'start', 'length'))
for row in reader:
try:
if not row[name_pos].strip() or row[name_pos].strip() in blacklist:
continue
except IndexError:
continue
if second_note_pos is not None and row[second_note_pos].strip():
col_name = '; '.join(name.strip() for name in (row[name_pos], row[
first_note_pos], row[second_note_pos]))
elif row[first_note_pos].strip():
col_name = '; '.join(name.strip() for name in (row[name_pos], row[
first_note_pos]))
else:
col_name = row[name_pos].strip()
col_start = int(row[start_pos].split('-')[0].strip())
col_length = int(float(row[length_pos])) - 1
writer.writerow((col_name, col_start, col_length))
<|reserved_special_token_1|>
import csv
import sys
if len(sys.argv[1:]) == 5 :
(name_pos, start_pos, length_pos,
first_note_pos, second_note_pos) = [int(pos) for pos in sys.argv[1:]]
elif len(sys.argv[1:]) == 4 :
(name_pos, start_pos, length_pos,
first_note_pos) = [int(pos) for pos in sys.argv[1:]]
second_note_pos = None
else :
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = 5, 3, 4, 2, 1
blacklist=("Blank", "semicolon filler")
reader = csv.reader(sys.stdin)
writer = csv.writer(sys.stdout)
writer.writerow(('column', 'start', 'length'))
for row in reader :
try :
if not row[name_pos].strip() or row[name_pos].strip() in blacklist :
continue
except IndexError :
continue
if second_note_pos is not None and row[second_note_pos].strip() :
col_name = '; '.join(name.strip() for name in (row[name_pos],
row[first_note_pos],
row[second_note_pos]))
elif row[first_note_pos].strip() :
col_name = '; '.join(name.strip() for name in (row[name_pos],
row[first_note_pos]))
else :
col_name = row[name_pos].strip()
col_start = int(row[start_pos].split('-')[0].strip())
col_length = int(float(row[length_pos])) - 1
writer.writerow((col_name, col_start, col_length))
|
flexible
|
{
"blob_id": "d7653a205fb8203fed4009846780c63dd1bcb505",
"index": 3603,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv[1:]) == 5:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int\n (pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4:\n name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in\n sys.argv[1:]]\n second_note_pos = None\nelse:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5, \n 3, 4, 2, 1)\n<mask token>\nwriter.writerow(('column', 'start', 'length'))\nfor row in reader:\n try:\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist:\n continue\n except IndexError:\n continue\n if second_note_pos is not None and row[second_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos], row[second_note_pos]))\n elif row[first_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos]))\n else:\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n",
"step-3": "<mask token>\nif len(sys.argv[1:]) == 5:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int\n (pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4:\n name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in\n sys.argv[1:]]\n second_note_pos = None\nelse:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5, \n 3, 4, 2, 1)\nblacklist = 'Blank', 'semicolon filler'\nreader = csv.reader(sys.stdin)\nwriter = csv.writer(sys.stdout)\nwriter.writerow(('column', 'start', 'length'))\nfor row in reader:\n try:\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist:\n continue\n except IndexError:\n continue\n if second_note_pos is not None and row[second_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos], row[second_note_pos]))\n elif row[first_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos]))\n else:\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n",
"step-4": "import csv\nimport sys\nif len(sys.argv[1:]) == 5:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = [int\n (pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4:\n name_pos, start_pos, length_pos, first_note_pos = [int(pos) for pos in\n sys.argv[1:]]\n second_note_pos = None\nelse:\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = (5, \n 3, 4, 2, 1)\nblacklist = 'Blank', 'semicolon filler'\nreader = csv.reader(sys.stdin)\nwriter = csv.writer(sys.stdout)\nwriter.writerow(('column', 'start', 'length'))\nfor row in reader:\n try:\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist:\n continue\n except IndexError:\n continue\n if second_note_pos is not None and row[second_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos], row[second_note_pos]))\n elif row[first_note_pos].strip():\n col_name = '; '.join(name.strip() for name in (row[name_pos], row[\n first_note_pos]))\n else:\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n",
"step-5": "import csv\nimport sys\n\nif len(sys.argv[1:]) == 5 :\n (name_pos, start_pos, length_pos, \n first_note_pos, second_note_pos) = [int(pos) for pos in sys.argv[1:]]\nelif len(sys.argv[1:]) == 4 :\n (name_pos, start_pos, length_pos, \n first_note_pos) = [int(pos) for pos in sys.argv[1:]]\n second_note_pos = None\nelse :\n name_pos, start_pos, length_pos, first_note_pos, second_note_pos = 5, 3, 4, 2, 1\n\nblacklist=(\"Blank\", \"semicolon filler\")\n\nreader = csv.reader(sys.stdin)\nwriter = csv.writer(sys.stdout)\nwriter.writerow(('column', 'start', 'length'))\n\nfor row in reader :\n try :\n if not row[name_pos].strip() or row[name_pos].strip() in blacklist :\n continue\n except IndexError :\n continue\n if second_note_pos is not None and row[second_note_pos].strip() :\n col_name = '; '.join(name.strip() for name in (row[name_pos], \n row[first_note_pos], \n row[second_note_pos]))\n elif row[first_note_pos].strip() :\n col_name = '; '.join(name.strip() for name in (row[name_pos], \n row[first_note_pos]))\n else :\n col_name = row[name_pos].strip()\n col_start = int(row[start_pos].split('-')[0].strip())\n col_length = int(float(row[length_pos])) - 1\n writer.writerow((col_name, col_start, col_length))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging, numpy as np, time, pandas as pd
from abc import abstractmethod
from kombu import binding
from tqdm import tqdm
from functools import lru_cache
from threading import Thread
from math import ceil
from copy import copy
from .pos import Position
from .base import BaseConsumer
from .event import SignalEventPct, OrderEvent
from .conf import LONG, SHORT, EXIT, MKT, BUY, SELL, LOCAL_TZ
from .util import clean_timestamp
from .errors import OverFilling
logger = logging.getLogger('Strategy')
class BaseStrategy(BaseConsumer):
"""Strategy is an abstract base class providing an interface for
all subsequent (inherited) strategy handling objects.
Goal
----
The goal of a (derived) Strategy object
- based on the inbound 'Tick', calcualte signals
- 'Signal' is at the symbol level which will be published
Note
----
This is designed to work both with historic and live data as
the Strategy object is agnostic to the data source,
since it obtains the 'Tick' object from MarketEvent message
"""
def __init__(
self, symbol_list, allocation, freq, positions,
start, end, warmup=0, fixed_allocation=True,
batch_size=10000
):
"""
Parameter:
----------
symbol_list (list): A list of Contract perm_tick (for data)
allocation (float): Dollar amount that this strategy is able to use
freq (conf.FREQ): Data Frequency type for this strategy (for data)
positions (dict of dict):
A dictionary with perm_tick and a dictionary of arguments
- pct_portfolio (float): percentage of the allocation
- rebalance (int): # of days to rebalance to pct_portfolio
- hard_stop (float): hard drawdown gate to close position
warmup (int): # of days to warmup the strategy
env_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}
which environment to run the startegy
start, end (datetime):
Only for backtesting to specificy the range of data to test
"""
n = ceil(freq.one_day)
num_pos = len(positions)
# getting neccesary parameters
self.symbol_list = symbol_list
self.freq = freq
self.warmup = warmup * n
if start:
self.start_dt = clean_timestamp(start)
if end:
self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1, days=1)
# allocation parameters for tracking portfolio
self.allocation = allocation
self.cash = allocation
self.commission = 0
self.fixed_allocation = fixed_allocation
pos_dict = {}
for perm_tick, v in positions.items():
# want to have position, must know its market ticks for decision
if perm_tick not in self.symbol_list:
self.symbol_list.append(perm_tick)
pos = Position(
perm_tick,
pct_portfolio=v.get('pct_portfolio', 1/num_pos),
rebalance=v.get('rebalance', 0) * n,
hard_stop=v.get('hard_stop', 0),
)
pos_dict[perm_tick] = pos
self.pos = pos_dict
# starting is always 0, it will increment itself every market tick
self.t = 0
self._hist = []
self.batch_size = batch_size
super().__init__(comp_type='STGY', required=['feed', 'exe'])
@abstractmethod
def calculate_signals(self):
"""Provide the mechanism to calculate a list of signals"""
raise NotImplementedError(
"Should implement calculate_signals()\n" + \
"By calling this method to calculate 'Signal' Events"
)
def subscriptions(self):
return [
('ack-reg-feed', self.id, self.on_ack_reg_feed),
('ack-dereg_feed', self.id, self.on_ack_dereg_feed),
('ack-reg-exe', self.id, self.on_ack_reg_exe),
('ack-dereg-exe', self.id, self.on_ack_dereg_exe),
('eod', self.id, self.on_eod),
('tick', self.id, self.on_market),
('fill', self.id, self.on_fill),
]
def update_data(self, ticks):
pass
def on_hard_stop(self, symbol):
pass
def on_rebalance(self, symbol):
pass
def has_position(self, symbol):
return self.pos[symbol].has_position
def has_open_orders(self, symbol):
return self.pos[symbol].has_open_orders
def has_long(self, symbol):
return self.pos[symbol].has_long
def has_short(self, symbol):
return self.pos[symbol].has_short
@property
def nav(self):
"""Net Account Value / Net Liquidating Value"""
return sum(pos.mv for pos in self.pos.values()) + self.cash
@property
def total_cost(self):
return sum(pos.cost for pos in self.pos.values())
@property
def total_bp(self):
if self.fixed_allocation:
return self.allocation
else:
return self.nav
@property
def avaliable_bp(self):
return self.total_bp - self.total_cost
def start(self):
while self.status != 'RUNNING':
time.sleep(2)
# setting up progress bar
self._pbar = tqdm(
total=int(np.ceil(
pd.bdate_range(self.start_dt, self.end_dt).size
* np.ceil(self.freq.one_day)
)),
miniters=int(np.ceil(self.freq.one_day)),
unit=' tick<{}>'.format(self.freq.value),
)
# publish event to get started
logger.info('Warming up Strategy')
self.basic_publish('warmup', sender=self.id)
logger.info('Really Starting up calculating Signals')
self.basic_publish('next', sender=self.id)
def on_ack_reg_feed(self, oid, body):
self.required['feed'] = True
def on_ack_reg_exe(self, oid, body):
self.required['exe'] = True
def on_ack_dereg_feed(self, oid, body):
self.required['feed'] = False
def on_ack_dereg_exe(self, oid, body):
self.required['exe'] = False
def on_eod(self, oid, body):
"""Handlering End of Data Event"""
self._pbar.update(self._pbar.total - self._pbar.n)
self._pbar.close()
self.basic_publish('dereg-feed', sender=self.id)
self.basic_publish('dereg-exe', sender=self.id)
self._stop()
def on_fill(self, oid, body):
"""Upon filled order
- update strategy's position, spot position reversion
- update holding time
- update position quantity
Parameter:
----------
fill (Fill Event)
"""
logger.info('Consuming filled Order')
fill = body['fill']
# update the position first
self.pos[fill.symbol].on_fill(fill)
# getting data from the fill event
Q = fill.quantity
K, D, C = fill.fill_cost, fill.fill_type, fill.commission
cost = D.value * K * Q
self.commission += C
self.cash -= cost + C
def on_market(self, oid, body):
"""On market event
- update information for each existing poistion
- generate orders for rebalancing()
- the strategy will calculate signal(s)
- and publish them to the exchange for processing
- then a "done" will be published to indicate
the strategy is finish doing everything this heartbeat
- so then the risk manager will collect all signals
before sending order for execution
Parameter:
----------
ticks (Market Event)
"""
if body['freq'] != self.freq: return
ticks = body['ticks']
self._update_data(ticks)
if self.t >= self.warmup:
self._calculate_signals()
# publish generated signals
equity = self.total_bp
bp = copy(self.avaliable_bp) # current snap_shot of buying power
for S, pos in self.pos.items():
for order, lvl in pos.generate_orders(equity):
used_bp = self.on_order(order, lvl, bp)
bp -= used_bp
# save old strategy performance history
self._pbar.update(1)
# if ticks.timestamp >= self.start_dt:
# self.basic_publish('next', sender=self.id)
if self.t >= self.warmup:
self._save_positions()
def on_order(self, order, lvl, bp):
"""Handling new order
- Orders are generated from signals
- will have to check currently avaliable buying power before publish
Parameter:
---------
order (Order Event)
lvl (str): Level of urgency for the order
This flag will be used to call corresponding callback
bp (float): The amount of avaliable buying power
Return:
-------
used buying power (float)
"""
S = order.symbol
need_bp = order.quantity * self.ticks[S].close
if need_bp <= bp: # have enough buying power to place order
used_bp = need_bp
if lvl == 'hard_stop':
self.on_hard_stop(S)
elif lvl == 'rebalance':
self.on_rebalance(S)
self.pos[order.symbol].confirm_order(order)
logger.info(
'Publish Order={} for Strategy={}'
.format(order, self.id)
)
self.basic_publish('order', sender=self.id, order=order)
else:
used_bp = 0
return used_bp
def generate_signal(self, symbol, signal_type, **kws):
"""Generate a signal that will stored at Strategy level
- Then all signals will be batch processed
Parameter
---------
symbol: str, the target symbol for the signal
signal_type: {LONG, SHORT, EXIT}
kws: additional arguments passes to the SignalEvent class
- especially the `strength` for percentage of portfolio
- if not passed, the default `pct_portfolio` will be used
"""
self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)
def _calculate_signals(self):
# update existing position information
for pos in self.pos.values():
pos._calculate_signals()
self.calculate_signals()
def _update_data(self, ticks):
"""Update the existing state of strategies
- based on given market observation
Note:
-----
1. It will always be called before calculating the new signal
2. this will be called no matter strategy is in warmup period or not
becuase warmup period is used for gathering nessceary data
"""
self.ticks = ticks
self.t += 1
for S, pos in self.pos.items():
pos._update_data(ticks[S])
self.update_data(ticks)
def _save_positions(self):
output = {
'timestamp': self.ticks.timestamp, 't': self.t,
'cash': self.cash, 'commission': self.commission,
'nav': self.nav,
}
for k, v in self.pos.items():
output[str(k)+'_quantity'] = v.quantity
output[str(k)+'_mv'] = v.mv
self._hist.append(output)
|
normal
|
{
"blob_id": "76d166bc227986863db77aa784be3de8110437ff",
"index": 530,
"step-1": "<mask token>\n\n\nclass BaseStrategy(BaseConsumer):\n <mask token>\n <mask token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n <mask token>\n <mask token>\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n <mask token>\n <mask token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <mask token>\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n <mask token>\n <mask token>\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n <mask token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n <mask token>\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"step-2": "<mask token>\n\n\nclass BaseStrategy(BaseConsumer):\n <mask token>\n <mask token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n <mask token>\n <mask token>\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n <mask token>\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <mask token>\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n <mask token>\n <mask token>\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n <mask token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n <mask token>\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"step-3": "<mask token>\n\n\nclass BaseStrategy(BaseConsumer):\n <mask token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"step-4": "<mask token>\nlogger = logging.getLogger('Strategy')\n\n\nclass BaseStrategy(BaseConsumer):\n \"\"\"Strategy is an abstract base class providing an interface for\n\tall subsequent (inherited) strategy handling objects.\n\n\tGoal\n\t----\n\tThe goal of a (derived) Strategy object \n\t- based on the inbound 'Tick', calcualte signals\n\t- 'Signal' is at the symbol level which will be published\n\n\tNote\n\t----\n\tThis is designed to work both with historic and live data as\n\tthe Strategy object is agnostic to the data source,\n\tsince it obtains the 'Tick' object from MarketEvent message\n\t\"\"\"\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"step-5": "import logging, numpy as np, time, pandas as pd\n\nfrom abc import abstractmethod\nfrom kombu import binding\nfrom tqdm import tqdm\nfrom functools import lru_cache\nfrom threading import Thread\nfrom math import ceil\nfrom copy import copy\n\nfrom .pos import Position\nfrom .base import BaseConsumer\nfrom .event import SignalEventPct, OrderEvent\nfrom .conf import LONG, SHORT, EXIT, MKT, BUY, SELL, LOCAL_TZ\nfrom .util import clean_timestamp\nfrom .errors import OverFilling\n\nlogger = logging.getLogger('Strategy')\n\n\n\nclass BaseStrategy(BaseConsumer):\n\t\"\"\"Strategy is an abstract base class providing an interface for\n\tall subsequent (inherited) strategy handling objects.\n\n\tGoal\n\t----\n\tThe goal of a (derived) Strategy object \n\t- based on the inbound 'Tick', calcualte signals\n\t- 'Signal' is at the symbol level which will be published\n\n\tNote\n\t----\n\tThis is designed to work both with historic and live data as\n\tthe Strategy object is agnostic to the data source,\n\tsince it obtains the 'Tick' object from MarketEvent message\n\t\"\"\"\n\tdef __init__(\n\t\tself, symbol_list, allocation, freq, positions,\n\t\tstart, end, warmup=0, fixed_allocation=True,\n\t\tbatch_size=10000\n\t):\n\t\t\"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n\t\tn = ceil(freq.one_day)\n\t\tnum_pos = len(positions)\n\n\t\t# getting neccesary parameters\n\t\tself.symbol_list = symbol_list\n\t\tself.freq = freq\n\t\tself.warmup = warmup * n\n\n\t\tif start:\n\t\t\tself.start_dt = clean_timestamp(start)\n\n\t\tif end:\n\t\t\tself.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1, days=1)\n\n\n\t\t# allocation parameters for tracking portfolio\n\t\tself.allocation = allocation\n\t\tself.cash = allocation\n\t\tself.commission = 0\n\t\tself.fixed_allocation = fixed_allocation\n\n\t\tpos_dict = {}\n\t\tfor perm_tick, v in positions.items():\n\t\t\t# want to have position, must know its market ticks for decision\n\t\t\tif perm_tick not in self.symbol_list:\n\t\t\t\tself.symbol_list.append(perm_tick)\n\n\t\t\tpos = Position(\n\t\t\t\tperm_tick,\n\t\t\t\tpct_portfolio=v.get('pct_portfolio', 1/num_pos),\n\t\t\t\trebalance=v.get('rebalance', 0) * n,\n\t\t\t\thard_stop=v.get('hard_stop', 0),\n\t\t\t)\n\t\t\tpos_dict[perm_tick] = pos\n\t\tself.pos = pos_dict\n\n\t\t# starting is always 0, it will increment itself every market tick\n\t\tself.t = 0\n\t\tself._hist = []\n\t\tself.batch_size = batch_size\n\n\t\tsuper().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n\n\t@abstractmethod\n\tdef calculate_signals(self):\n\t\t\"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)\n\n\tdef subscriptions(self):\n\t\treturn [\n\t\t\t('ack-reg-feed', self.id, self.on_ack_reg_feed),\n\t\t\t('ack-dereg_feed', self.id, self.on_ack_dereg_feed),\n\t\t\t('ack-reg-exe', self.id, self.on_ack_reg_exe),\n\t\t\t('ack-dereg-exe', self.id, self.on_ack_dereg_exe),\n\t\t\t('eod', self.id, self.on_eod),\n\t\t\t('tick', self.id, self.on_market),\n\t\t\t('fill', self.id, self.on_fill),\n\t\t]\n\t\t\n\tdef update_data(self, ticks):\n\t\tpass\n\n\tdef on_hard_stop(self, symbol):\n\t\tpass\n\n\tdef on_rebalance(self, symbol):\n\t\tpass\n\n\tdef has_position(self, symbol):\n\t\treturn self.pos[symbol].has_position\n\n\tdef has_open_orders(self, symbol):\n\t\treturn self.pos[symbol].has_open_orders\n\n\tdef has_long(self, symbol):\n\t\treturn self.pos[symbol].has_long\n\n\tdef has_short(self, symbol):\n\t\treturn self.pos[symbol].has_short\n\n\t@property\n\tdef nav(self):\n\t\t\"\"\"Net Account Value / Net Liquidating Value\"\"\"\n\t\treturn sum(pos.mv for pos in self.pos.values()) + self.cash\n\n\t@property\n\tdef total_cost(self):\n\t\treturn sum(pos.cost for pos in self.pos.values())\n\n\t@property\n\tdef total_bp(self):\n\t\tif self.fixed_allocation:\n\t\t\treturn self.allocation\n\t\telse:\n\t\t\treturn self.nav\n\n\t@property\n\tdef avaliable_bp(self):\n\t\treturn self.total_bp - self.total_cost\n\n\tdef start(self):\n\t\twhile self.status != 'RUNNING':\t\n\t\t\ttime.sleep(2)\n\n\t\t# setting up progress bar\n\t\tself._pbar = tqdm(\n\t\t\ttotal=int(np.ceil(\n\t\t\t\tpd.bdate_range(self.start_dt, self.end_dt).size\n\t\t\t\t* np.ceil(self.freq.one_day)\n\t\t\t)),\n\t\t\tminiters=int(np.ceil(self.freq.one_day)),\n\t\t\tunit=' tick<{}>'.format(self.freq.value),\n\t\t)\n\n\t\t# publish event to get started\n\t\tlogger.info('Warming up Strategy')\n\t\tself.basic_publish('warmup', sender=self.id)\n\t\tlogger.info('Really Starting up calculating Signals')\n\t\tself.basic_publish('next', sender=self.id)\n\n\n\tdef on_ack_reg_feed(self, oid, body):\n\t\tself.required['feed'] = True\n\n\tdef on_ack_reg_exe(self, oid, body):\n\t\tself.required['exe'] = True\n\n\tdef on_ack_dereg_feed(self, oid, body):\n\t\tself.required['feed'] = False\n\n\tdef on_ack_dereg_exe(self, oid, body):\n\t\tself.required['exe'] = False\n\n\n\tdef on_eod(self, oid, body):\n\t\t\"\"\"Handlering End of Data Event\"\"\"\n\t\tself._pbar.update(self._pbar.total - self._pbar.n)\n\t\tself._pbar.close()\n\n\t\tself.basic_publish('dereg-feed', sender=self.id)\n\t\tself.basic_publish('dereg-exe', sender=self.id)\n\n\t\tself._stop()\n\n\n\tdef on_fill(self, oid, body):\n\t\t\"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n\t\tlogger.info('Consuming filled Order')\n\t\tfill = body['fill']\n\n\t\t# update the position first\n\t\tself.pos[fill.symbol].on_fill(fill)\n\n\t\t# getting data from the fill event\n\t\tQ = fill.quantity\n\t\tK, D, C = fill.fill_cost, fill.fill_type, fill.commission\n\n\t\tcost = D.value * K * Q\n\n\t\tself.commission += C\n\t\tself.cash -= cost + C\n\n\n\tdef on_market(self, oid, body):\n\t\t\"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n\t\tif body['freq'] != self.freq: return\n\n\t\tticks = body['ticks']\n\t\tself._update_data(ticks)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._calculate_signals()\n\n\t\t\t# publish generated signals\n\t\t\tequity = self.total_bp\n\t\t\tbp = copy(self.avaliable_bp) # current snap_shot of buying power\n\t\t\tfor S, pos in self.pos.items():\n\t\t\t\tfor order, lvl in pos.generate_orders(equity):\n\t\t\t\t\tused_bp = self.on_order(order, lvl, bp)\n\t\t\t\t\tbp -= used_bp\n\t\t\t\t\n\t\t\t# save old strategy performance history\n\t\t\tself._pbar.update(1)\n\t\t\n\t\t# if ticks.timestamp >= self.start_dt:\n\t\t\t# self.basic_publish('next', sender=self.id)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._save_positions()\n\n\n\tdef on_order(self, order, lvl, bp):\n\t\t\"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n\t\tS = order.symbol\n\n\t\tneed_bp = order.quantity * self.ticks[S].close\n\t\tif need_bp <= bp: # have enough buying power to place order\n\t\t\tused_bp = need_bp\n\n\t\t\tif lvl == 'hard_stop':\n\t\t\t\tself.on_hard_stop(S)\n\t\t\telif lvl == 'rebalance':\n\t\t\t\tself.on_rebalance(S)\n\n\t\t\tself.pos[order.symbol].confirm_order(order)\n\t\t\tlogger.info(\n\t\t\t\t'Publish Order={} for Strategy={}'\n\t\t\t\t.format(order, self.id)\n\t\t\t)\n\t\t\tself.basic_publish('order', sender=self.id, order=order)\n\t\telse:\n\t\t\tused_bp = 0\n\t\treturn used_bp\n\n\n\tdef generate_signal(self, symbol, signal_type, **kws):\n\t\t\"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n\t\tself.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n\n\tdef _calculate_signals(self):\n\t\t# update existing position information\n\t\tfor pos in self.pos.values():\n\t\t\tpos._calculate_signals()\n\n\t\tself.calculate_signals()\n\n\n\tdef _update_data(self, ticks):\n\t\t\"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n\t\tself.ticks = ticks\n\t\tself.t += 1\n\n\t\tfor S, pos in self.pos.items():\n\t\t\tpos._update_data(ticks[S])\n\n\t\tself.update_data(ticks)\n\n\n\tdef _save_positions(self):\n\t\toutput = {\n\t\t\t'timestamp': self.ticks.timestamp, 't': self.t,\n\t\t\t'cash': self.cash, 'commission': self.commission,\n\t\t\t'nav': self.nav,\n\t\t}\n\t\tfor k, v in self.pos.items():\n\t\t\toutput[str(k)+'_quantity'] = v.quantity\n\t\t\toutput[str(k)+'_mv'] = v.mv\n\n\t\tself._hist.append(output)\n\t\t",
"step-ids": [
18,
19,
28,
30,
32
]
}
|
[
18,
19,
28,
30,
32
] |
# -*- coding: utf-8 -*-
import sys
import xlrd
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
param = sys.argv
print "Hello:" + param[0]
# ファイルのオープン
book = xlrd.open_workbook('sample.xls')
# シートの選択
sheet = book.sheet_by_name(u"Sheet1")
# sheet = book.sheet_by_index(0)
plot_x = np.zeros(sheet.nrows-1, dtype=np.float64)
plot_y = np.zeros(sheet.nrows-1, dtype=np.float64)
for row in range(sheet.nrows):
if row==0:
plt.xlabel(sheet.cell(0,1).value)
plt.ylabel(sheet.cell(0,2).value)
pass
elif row>=1:
plot_x[row-1] = float(sheet.cell(row,1).value)
plot_y[row-1] = float(sheet.cell(row,2).value)
plt.xlim([0,100])
plt.ylim([0,50])
plt.plot(plot_x, plot_y,'o',color='r', label='test1')
plt.title(u'排出量')
plt.legend(loc='lower right') # 凡例表示
plt.show()
|
normal
|
{
"blob_id": "dacd4334433eb323ce732c96f680fb7b9333721a",
"index": 2268,
"step-1": "# -*- coding: utf-8 -*-\n\nimport sys\nimport xlrd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n\tparam = sys.argv\n\tprint \"Hello:\" + param[0]\n\n\t# ファイルのオープン\n\tbook = xlrd.open_workbook('sample.xls')\n\n\t# シートの選択\n\tsheet = book.sheet_by_name(u\"Sheet1\")\n#\tsheet = book.sheet_by_index(0)\n\n\tplot_x = np.zeros(sheet.nrows-1, dtype=np.float64)\n\tplot_y = np.zeros(sheet.nrows-1, dtype=np.float64)\n\n\tfor row in range(sheet.nrows):\n\t\tif row==0:\n\t\t\tplt.xlabel(sheet.cell(0,1).value)\n\t\t\tplt.ylabel(sheet.cell(0,2).value)\n\t\t\tpass\n\t\telif row>=1:\n\t\t\tplot_x[row-1] = float(sheet.cell(row,1).value)\n\t\t\tplot_y[row-1] = float(sheet.cell(row,2).value)\n\t\t\t\n\tplt.xlim([0,100])\n\tplt.ylim([0,50])\n\tplt.plot(plot_x, plot_y,'o',color='r', label='test1')\n\tplt.title(u'排出量')\n\tplt.legend(loc='lower right') # 凡例表示\n\tplt.show()\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
<|reserved_special_token_1|>
import cgitb
import sys
from auth import is_admin
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import cgitb
import sys
from auth import is_admin
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
|
flexible
|
{
"blob_id": "be9972d899a167a8ca2728960e55cda538793cc5",
"index": 1576,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-3": "import cgitb\nimport sys\nfrom auth import is_admin\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-4": "#!/usr/bin/env python3\nimport cgitb\nimport sys\n\nfrom auth import is_admin\n\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
if __name__ == '__main__':
with open('./input/day6', 'r') as f:
orbit_input = [l.strip().split(')') for l in f.readlines()]
planets = [planet[0] for planet in orbit_input]
planets1 = [planet[1] for planet in orbit_input]
planets = set(planets + planets1)
system = {}
print(orbit_input)
for inp in orbit_input:
system[inp[1]] = inp[0]
def compute_orbits(planet, system):
if planet == 'COM':
return 0
next_p = system[planet]
return 1 + compute_orbits(next_p, system)
num_orb = 0
for planet in planets:
num_orb = num_orb + compute_orbits(planet, system)
print(num_orb)
<|reserved_special_token_1|>
if __name__== '__main__':
with open('./input/day6', 'r') as f:
orbit_input = [l.strip().split(")") for l in f.readlines()]
planets = [planet[0] for planet in orbit_input]
planets1 = [planet[1] for planet in orbit_input]
planets = set(planets+planets1)
system = {}
print(orbit_input)
for inp in orbit_input:
system[inp[1]] = inp[0]
def compute_orbits(planet, system):
if planet == 'COM':
return 0
next_p = system[planet]
return 1 + compute_orbits(next_p, system)
num_orb = 0
for planet in planets:
num_orb = num_orb + compute_orbits(planet, system)
print(num_orb)
|
flexible
|
{
"blob_id": "96778a238d8ed8ae764d0cf8ec184618dc7cfe18",
"index": 5790,
"step-1": "<mask token>\n",
"step-2": "if __name__ == '__main__':\n with open('./input/day6', 'r') as f:\n orbit_input = [l.strip().split(')') for l in f.readlines()]\n planets = [planet[0] for planet in orbit_input]\n planets1 = [planet[1] for planet in orbit_input]\n planets = set(planets + planets1)\n system = {}\n print(orbit_input)\n for inp in orbit_input:\n system[inp[1]] = inp[0]\n\n def compute_orbits(planet, system):\n if planet == 'COM':\n return 0\n next_p = system[planet]\n return 1 + compute_orbits(next_p, system)\n num_orb = 0\n for planet in planets:\n num_orb = num_orb + compute_orbits(planet, system)\n print(num_orb)\n",
"step-3": "\nif __name__== '__main__':\n with open('./input/day6', 'r') as f:\n orbit_input = [l.strip().split(\")\") for l in f.readlines()]\n planets = [planet[0] for planet in orbit_input]\n planets1 = [planet[1] for planet in orbit_input]\n planets = set(planets+planets1)\n system = {}\n print(orbit_input)\n\n for inp in orbit_input:\n system[inp[1]] = inp[0]\n\n def compute_orbits(planet, system):\n if planet == 'COM':\n return 0\n next_p = system[planet]\n return 1 + compute_orbits(next_p, system)\n\n num_orb = 0\n for planet in planets:\n num_orb = num_orb + compute_orbits(planet, system)\n print(num_orb)\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_image_dataset_functional():
from dtoolai.data import ImageDataSet
ids_uri = 'http://bit.ly/2Uho6tN'
ids = ImageDataSet(ids_uri)
assert ids.name == 'tiny.image.dataset.example'
assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'
assert len(ids) == 6
assert ids.input_channels == 3
assert ids.dim == 256
im, label = ids[0]
assert isinstance(im, np.ndarray)
assert label == 0
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_tensor_dataset_functional():
from dtoolai.data import TensorDataSet
tds_uri = os.path.join(TEST_SAMPLE_DATA, 'example_tensor_dataset')
tds = TensorDataSet(tds_uri)
assert tds.name == 'example_tensor_dataset'
assert tds.uuid == '6b6f9a0e-8547-4903-9090-6dcfc6abdf83'
assert len(tds) == 100
data, label = tds[0]
assert data.shape == (1, 9, 9)
assert data[0][0][0] == 0
assert label == 0
assert tds.input_channels == 1
assert tds.dim == 9
def test_image_dataset_functional():
from dtoolai.data import ImageDataSet
ids_uri = 'http://bit.ly/2Uho6tN'
ids = ImageDataSet(ids_uri)
assert ids.name == 'tiny.image.dataset.example'
assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'
assert len(ids) == 6
assert ids.input_channels == 3
assert ids.dim == 256
im, label = ids[0]
assert isinstance(im, np.ndarray)
assert label == 0
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
<|reserved_special_token_1|>
import os
import numpy as np
from . import tmp_dir_fixture
from . import TEST_SAMPLE_DATA
def test_tensor_dataset_functional():
from dtoolai.data import TensorDataSet
tds_uri = os.path.join(TEST_SAMPLE_DATA, 'example_tensor_dataset')
tds = TensorDataSet(tds_uri)
assert tds.name == 'example_tensor_dataset'
assert tds.uuid == '6b6f9a0e-8547-4903-9090-6dcfc6abdf83'
assert len(tds) == 100
data, label = tds[0]
assert data.shape == (1, 9, 9)
assert data[0][0][0] == 0
assert label == 0
assert tds.input_channels == 1
assert tds.dim == 9
def test_image_dataset_functional():
from dtoolai.data import ImageDataSet
ids_uri = 'http://bit.ly/2Uho6tN'
ids = ImageDataSet(ids_uri)
assert ids.name == 'tiny.image.dataset.example'
assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'
assert len(ids) == 6
assert ids.input_channels == 3
assert ids.dim == 256
im, label = ids[0]
assert isinstance(im, np.ndarray)
assert label == 0
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
<|reserved_special_token_1|>
import os
import numpy as np
from . import tmp_dir_fixture
from . import TEST_SAMPLE_DATA
def test_tensor_dataset_functional():
from dtoolai.data import TensorDataSet
tds_uri = os.path.join(TEST_SAMPLE_DATA, "example_tensor_dataset")
tds = TensorDataSet(tds_uri)
assert tds.name == "example_tensor_dataset"
assert tds.uuid == "6b6f9a0e-8547-4903-9090-6dcfc6abdf83"
assert len(tds) == 100
data, label = tds[0]
assert data.shape == (1, 9, 9)
assert data[0][0][0] == 0
assert label == 0
assert tds.input_channels == 1
assert tds.dim == 9
def test_image_dataset_functional():
from dtoolai.data import ImageDataSet
ids_uri = "http://bit.ly/2Uho6tN"
ids = ImageDataSet(ids_uri)
assert ids.name == "tiny.image.dataset.example"
assert ids.uuid == "839ae396-74a7-44f9-9b08-436be53b1090"
assert len(ids) == 6
assert ids.input_channels == 3
assert ids.dim == 256
im, label = ids[0]
assert isinstance(im, np.ndarray)
assert label == 0
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
|
flexible
|
{
"blob_id": "97dfcce6e82ef33334b49de72bb126150dfef196",
"index": 2844,
"step-1": "<mask token>\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-2": "<mask token>\n\n\ndef test_image_dataset_functional():\n from dtoolai.data import ImageDataSet\n ids_uri = 'http://bit.ly/2Uho6tN'\n ids = ImageDataSet(ids_uri)\n assert ids.name == 'tiny.image.dataset.example'\n assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'\n assert len(ids) == 6\n assert ids.input_channels == 3\n assert ids.dim == 256\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-3": "<mask token>\n\n\ndef test_tensor_dataset_functional():\n from dtoolai.data import TensorDataSet\n tds_uri = os.path.join(TEST_SAMPLE_DATA, 'example_tensor_dataset')\n tds = TensorDataSet(tds_uri)\n assert tds.name == 'example_tensor_dataset'\n assert tds.uuid == '6b6f9a0e-8547-4903-9090-6dcfc6abdf83'\n assert len(tds) == 100\n data, label = tds[0]\n assert data.shape == (1, 9, 9)\n assert data[0][0][0] == 0\n assert label == 0\n assert tds.input_channels == 1\n assert tds.dim == 9\n\n\ndef test_image_dataset_functional():\n from dtoolai.data import ImageDataSet\n ids_uri = 'http://bit.ly/2Uho6tN'\n ids = ImageDataSet(ids_uri)\n assert ids.name == 'tiny.image.dataset.example'\n assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'\n assert len(ids) == 6\n assert ids.input_channels == 3\n assert ids.dim == 256\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-4": "import os\nimport numpy as np\nfrom . import tmp_dir_fixture\nfrom . import TEST_SAMPLE_DATA\n\n\ndef test_tensor_dataset_functional():\n from dtoolai.data import TensorDataSet\n tds_uri = os.path.join(TEST_SAMPLE_DATA, 'example_tensor_dataset')\n tds = TensorDataSet(tds_uri)\n assert tds.name == 'example_tensor_dataset'\n assert tds.uuid == '6b6f9a0e-8547-4903-9090-6dcfc6abdf83'\n assert len(tds) == 100\n data, label = tds[0]\n assert data.shape == (1, 9, 9)\n assert data[0][0][0] == 0\n assert label == 0\n assert tds.input_channels == 1\n assert tds.dim == 9\n\n\ndef test_image_dataset_functional():\n from dtoolai.data import ImageDataSet\n ids_uri = 'http://bit.ly/2Uho6tN'\n ids = ImageDataSet(ids_uri)\n assert ids.name == 'tiny.image.dataset.example'\n assert ids.uuid == '839ae396-74a7-44f9-9b08-436be53b1090'\n assert len(ids) == 6\n assert ids.input_channels == 3\n assert ids.dim == 256\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n\n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n",
"step-5": "import os\n\nimport numpy as np\n\nfrom . import tmp_dir_fixture\nfrom . import TEST_SAMPLE_DATA\n\n\n\ndef test_tensor_dataset_functional():\n\n from dtoolai.data import TensorDataSet\n\n tds_uri = os.path.join(TEST_SAMPLE_DATA, \"example_tensor_dataset\")\n\n tds = TensorDataSet(tds_uri)\n assert tds.name == \"example_tensor_dataset\"\n assert tds.uuid == \"6b6f9a0e-8547-4903-9090-6dcfc6abdf83\"\n assert len(tds) == 100\n\n data, label = tds[0]\n assert data.shape == (1, 9, 9)\n assert data[0][0][0] == 0\n assert label == 0\n\n assert tds.input_channels == 1\n assert tds.dim == 9\n\n\ndef test_image_dataset_functional():\n\n from dtoolai.data import ImageDataSet\n\n ids_uri = \"http://bit.ly/2Uho6tN\"\n\n ids = ImageDataSet(ids_uri)\n assert ids.name == \"tiny.image.dataset.example\"\n assert ids.uuid == \"839ae396-74a7-44f9-9b08-436be53b1090\"\n assert len(ids) == 6\n\n assert ids.input_channels == 3\n assert ids.dim == 256\n\n im, label = ids[0]\n assert isinstance(im, np.ndarray)\n assert label == 0\n \n\ndef test_create_tensor_dataset_from_arrays(tmp_dir_fixture):\n pass\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=
1, skip_footer=1, delimiter=':')
parametry = {}
if ustawienia.shape == (2,):
parametry[re.sub('"', '', ustawienia[0])] = ustawienia[1]
else:
for l in ustawienia:
parametry[re.sub('"', '', l[0])] = l[1]
try:
parametry['tryb'] = parametry['tryb'].strip()
except KeyError:
print('Podaj tryb odczytu!')
try:
parametry['bpm'] = int(parametry['bpm'])
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq'])
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'])
except KeyError:
pass
try:
parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]
except KeyError:
pass
return parametry
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=
1, skip_footer=1, delimiter=':')
parametry = {}
if ustawienia.shape == (2,):
parametry[re.sub('"', '', ustawienia[0])] = ustawienia[1]
else:
for l in ustawienia:
parametry[re.sub('"', '', l[0])] = l[1]
try:
parametry['tryb'] = parametry['tryb'].strip()
except KeyError:
print('Podaj tryb odczytu!')
try:
parametry['bpm'] = int(parametry['bpm'])
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq'])
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'])
except KeyError:
pass
try:
parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]
except KeyError:
pass
return parametry
def zmiana_glosnosci(utwor, procent=0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if -1 <= procent <= 1:
mnoznik = 0
if procent < 0:
mnoznik = 1 + procent
else:
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767 / maks_ampli
mnoznik = 1 + (mnoznik - 1) * procent
glosniej = mnoznik * utwor
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print('Podaj procent z zakresu -1 do 1')
def tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=
44100, wages=None, loud=0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
if czy_pelna == False:
print('Nie utworzono piosenki')
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm
ile_cwiercnut = macierz_piosenki.shape[0]
kanaly = macierz_piosenki.shape[1]
frekw = freq
czas_utworu = ile_cwiercnut * t_cwiercnuty
ilosc_probek = int(frekw * czas_utworu)
rozne_sample = np.unique(macierz_piosenki)
sample_co = {}
sample_frekw = {}
sample_dl = {}
for ktory_sampel in rozne_sample:
if ktory_sampel != '--':
plik = ''.join(['sample', ktory_sampel, '.wav'])
sample_frekw[ktory_sampel], sample_co[ktory_sampel
] = scipy.io.wavfile.read(plik)
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],
axis=1) / 32767
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /
max(np.abs(sample_co[ktory_sampel])) * 32767)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else:
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw
sample_dl[ktory_sampel] = 0
if wages is None:
wages = np.ones((1, kanaly))
else:
wages = np.array(wages).reshape(1, kanaly)
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = []
dlugosci = []
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz, i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
maksik = max(dlugosci)
pusty = np.int16(np.zeros((len(sample), maksik)))
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
cwiercnuta = np.dot(wages, pusty)
cwiercnuta = cwiercnuta[0]
poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)
if poczatek_cwiercnuty + maksik > ilosc_probek:
T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik
] = cwiercnuta[0:len(T[poczatek_cwiercnuty:
poczatek_cwiercnuty + maksik])]
else:
T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik
] += cwiercnuta
T = np.array(T, dtype=np.int16)
T = zmiana_glosnosci(T, loud)
return T
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Laduje modul o nazwie: ' + __name__)
<|reserved_special_token_0|>
def wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=
1, skip_footer=1, delimiter=':')
parametry = {}
if ustawienia.shape == (2,):
parametry[re.sub('"', '', ustawienia[0])] = ustawienia[1]
else:
for l in ustawienia:
parametry[re.sub('"', '', l[0])] = l[1]
try:
parametry['tryb'] = parametry['tryb'].strip()
except KeyError:
print('Podaj tryb odczytu!')
try:
parametry['bpm'] = int(parametry['bpm'])
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq'])
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'])
except KeyError:
pass
try:
parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]
except KeyError:
pass
return parametry
def zmiana_glosnosci(utwor, procent=0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if -1 <= procent <= 1:
mnoznik = 0
if procent < 0:
mnoznik = 1 + procent
else:
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767 / maks_ampli
mnoznik = 1 + (mnoznik - 1) * procent
glosniej = mnoznik * utwor
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print('Podaj procent z zakresu -1 do 1')
def tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=
44100, wages=None, loud=0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
if czy_pelna == False:
print('Nie utworzono piosenki')
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm
ile_cwiercnut = macierz_piosenki.shape[0]
kanaly = macierz_piosenki.shape[1]
frekw = freq
czas_utworu = ile_cwiercnut * t_cwiercnuty
ilosc_probek = int(frekw * czas_utworu)
rozne_sample = np.unique(macierz_piosenki)
sample_co = {}
sample_frekw = {}
sample_dl = {}
for ktory_sampel in rozne_sample:
if ktory_sampel != '--':
plik = ''.join(['sample', ktory_sampel, '.wav'])
sample_frekw[ktory_sampel], sample_co[ktory_sampel
] = scipy.io.wavfile.read(plik)
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],
axis=1) / 32767
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /
max(np.abs(sample_co[ktory_sampel])) * 32767)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else:
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw
sample_dl[ktory_sampel] = 0
if wages is None:
wages = np.ones((1, kanaly))
else:
wages = np.array(wages).reshape(1, kanaly)
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = []
dlugosci = []
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz, i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
maksik = max(dlugosci)
pusty = np.int16(np.zeros((len(sample), maksik)))
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
cwiercnuta = np.dot(wages, pusty)
cwiercnuta = cwiercnuta[0]
poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)
if poczatek_cwiercnuty + maksik > ilosc_probek:
T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik
] = cwiercnuta[0:len(T[poczatek_cwiercnuty:
poczatek_cwiercnuty + maksik])]
else:
T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik
] += cwiercnuta
T = np.array(T, dtype=np.int16)
T = zmiana_glosnosci(T, loud)
return T
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Laduje modul o nazwie: ' + __name__)
import numpy as np
def wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=
1, skip_footer=1, delimiter=':')
parametry = {}
if ustawienia.shape == (2,):
parametry[re.sub('"', '', ustawienia[0])] = ustawienia[1]
else:
for l in ustawienia:
parametry[re.sub('"', '', l[0])] = l[1]
try:
parametry['tryb'] = parametry['tryb'].strip()
except KeyError:
print('Podaj tryb odczytu!')
try:
parametry['bpm'] = int(parametry['bpm'])
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq'])
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'])
except KeyError:
pass
try:
parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]
except KeyError:
pass
return parametry
def zmiana_glosnosci(utwor, procent=0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if -1 <= procent <= 1:
mnoznik = 0
if procent < 0:
mnoznik = 1 + procent
else:
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767 / maks_ampli
mnoznik = 1 + (mnoznik - 1) * procent
glosniej = mnoznik * utwor
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print('Podaj procent z zakresu -1 do 1')
def tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=
44100, wages=None, loud=0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
if czy_pelna == False:
print('Nie utworzono piosenki')
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm
ile_cwiercnut = macierz_piosenki.shape[0]
kanaly = macierz_piosenki.shape[1]
frekw = freq
czas_utworu = ile_cwiercnut * t_cwiercnuty
ilosc_probek = int(frekw * czas_utworu)
rozne_sample = np.unique(macierz_piosenki)
sample_co = {}
sample_frekw = {}
sample_dl = {}
for ktory_sampel in rozne_sample:
if ktory_sampel != '--':
plik = ''.join(['sample', ktory_sampel, '.wav'])
sample_frekw[ktory_sampel], sample_co[ktory_sampel
] = scipy.io.wavfile.read(plik)
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],
axis=1) / 32767
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /
max(np.abs(sample_co[ktory_sampel])) * 32767)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else:
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw
sample_dl[ktory_sampel] = 0
if wages is None:
wages = np.ones((1, kanaly))
else:
wages = np.array(wages).reshape(1, kanaly)
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = []
dlugosci = []
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz, i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
maksik = max(dlugosci)
pusty = np.int16(np.zeros((len(sample), maksik)))
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
cwiercnuta = np.dot(wages, pusty)
cwiercnuta = cwiercnuta[0]
poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)
if poczatek_cwiercnuty + maksik > ilosc_probek:
T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik
] = cwiercnuta[0:len(T[poczatek_cwiercnuty:
poczatek_cwiercnuty + maksik])]
else:
T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik
] += cwiercnuta
T = np.array(T, dtype=np.int16)
T = zmiana_glosnosci(T, loud)
return T
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Modul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,
"zglasnianie utworu")
"""
print("Laduje modul o nazwie: "+__name__)
import numpy as np
def wczytywanie_ustawien(plik_konfiguracyjny = "defs.txt"):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
# wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz
# wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako
# oddzielne elementy, zapisane jako stringi)
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \
skip_header=1, skip_footer=1, delimiter=":")
# tworze slownik, ktory bedzie przechowywal wartosci
parametry = {}
# pozbywam się "" z key
# jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o
# shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia
if ustawienia.shape == (2,):
parametry[re.sub('"','',ustawienia[0])] = ustawienia[1]
# jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy
# ustawienia
else:
for l in ustawienia:
parametry[re.sub('"','',l[0])] = l[1]
# zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec
# robie to recznie
try:
parametry['tryb'] = parametry['tryb'].strip() #tryb
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
print("Podaj tryb odczytu!")
try:
parametry['bpm'] = int(parametry['bpm']) # tempo
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'] ) # glosnosc
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
# lista wag dla sampli
parametry['wages'] = [float(s) for s in parametry['wages'].split(",")]
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
return parametry
#b = wczytywanie_ustawien("defs.txt")
#zglasnianie utworu
def zmiana_glosnosci(utwor, procent = 0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if(-1 <= procent <= 1):
#ile razy mamy pomnozyc amplitude naszego dzwieku
mnoznik = 0
if( procent < 0 ):
mnoznik = 1 + procent
else:
# obliczamy najwyzsza amplitude w danym utworze i ona bedzie
# wyznaczac jak bardzo mozemy podglosnic
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767/maks_ampli # maksymalny mnoznik
# mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej
# (mnoznik-1) mnozymy o procent zglosnienia
# i dodajemy do podstawy (czyli 1)
mnoznik = 1 + (mnoznik - 1)*procent
glosniej = mnoznik * utwor
#glosniej = np.array(glosniej, dtype=np.int16)
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print("Podaj procent z zakresu -1 do 1")
#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])
#wierszyk1
def tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \
freq = 44100, wages = None, loud = 0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
# macierz piosenki byla pusta, piosenka nie zostala utworzona
if(czy_pelna == False):
print("Nie utworzono piosenki")
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od
#tempa)
ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut
kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli
frekw = freq
czas_utworu = ile_cwiercnut*t_cwiercnuty
# ile elementow bedzie w nowym utworze
ilosc_probek = int(frekw*czas_utworu)
# bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje
# unikalne numery sampli
rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z "--"
# w slownikach zapiszemy parametry tych sampli
# slownik z wartosciami danego sampla (tj. macierze numpy-owe z
# amplitudami)
sample_co = {}
sample_frekw = {} # slownik z ich frekwencjami
sample_dl = {} # slownik z ich dlugosciami
#wczytujemy te sample
# w iteratorze bierzemy napisy "01" "02" "--" itd. stringi!!!
for ktory_sampel in rozne_sample:
if(ktory_sampel != '--'):
# tworzymy napis z nazwa pliku sampla, np. "sample01.wav"
plik = ''.join(['sample',ktory_sampel,'.wav'])
# wczytujemy zawartosc i frekwencje danego sampla do
# odpowiednio nazwanego elementu w slowniku sample_co i
# sample_frekw
sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \
scipy.io.wavfile.read(plik)
# tworzymy mono z naszego sampla
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\
axis=1)/32767
# normalizujemy te wartosci
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \
max(np.abs(sample_co[ktory_sampel])) * 32767)
# zapisujemy dlugosc sampli, czyli ilosc probek
# ( = czas_trwania*frekwencja)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else: # to samo robimy dla "--" recznie ustawiamy
# robimy cisze, gdy --
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna
sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund
if wages is None:
wages = np.ones((1,kanaly))
else:
# zeby mialo wymiar (1,kanaly), a nie (kanaly,)
wages = np.array(wages).reshape(1,kanaly)
# definicja nowego utworu
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = [] # wczytamy sample z danej cwiecnuty
dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz,i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
# bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac;
# reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy
# cisze (zera)
maksik = max(dlugosci)
# mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia
# potem tych dzwiekow w jeden
pusty = np.int16(np.zeros((len(sample), maksik)))
# dodajemy nasze dzwieki do tej pustej
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
# mnozymy kolejne elementy wektora pusty (czyli sample) przez
# wagi i sumujemy
cwiercnuta = np.dot(wages, pusty)
#otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy
# element
cwiercnuta = cwiercnuta[0]
# poczatek biezacej cwiercnuty
poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)
# jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z
# przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie
# dzwieki, tak by zmiescic sie w tej dlugosci
if (poczatek_cwiercnuty + maksik) > ilosc_probek:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\
cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\
maksik)])]
else:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \
cwiercnuta
T= np.array(T, dtype=np.int16)
#ustalamy glosnosc utworu
T = zmiana_glosnosci(T, loud)
return T
#pios, k = wczytywanie_sciezek(a)
#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \
#wages = b['wages'])
#wierszyk = tworzenie_piosenki(pios, k, **b)
#wierszyk
|
flexible
|
{
"blob_id": "8220a6d33cda5861e74d6236757abbc81685a998",
"index": 6369,
"step-1": "<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-3": "<mask token>\nprint('Laduje modul o nazwie: ' + __name__)\n<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-4": "<mask token>\nprint('Laduje modul o nazwie: ' + __name__)\nimport numpy as np\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nModul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,\r\n \"zglasnianie utworu\")\r\n\"\"\"\r\n\r\n\r\nprint(\"Laduje modul o nazwie: \"+__name__)\r\n\r\nimport numpy as np\r\n\r\ndef wczytywanie_ustawien(plik_konfiguracyjny = \"defs.txt\"):\r\n \"\"\" \r\n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\r\n \r\n arg:\r\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \r\n wartosciami parametrow (tempo itd.)\r\n \r\n wyjscie:\r\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\r\n \r\n \"\"\"\r\n import re\r\n import numpy as np\r\n \r\n # wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz \r\n # wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako \r\n # oddzielne elementy, zapisane jako stringi)\r\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \\\r\n skip_header=1, skip_footer=1, delimiter=\":\")\r\n \r\n # tworze slownik, ktory bedzie przechowywal wartosci\r\n parametry = {}\r\n \r\n # pozbywam się \"\" z key\r\n \r\n # jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o \r\n # shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia\r\n if ustawienia.shape == (2,): \r\n parametry[re.sub('\"','',ustawienia[0])] = ustawienia[1]\r\n # jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy \r\n # ustawienia\r\n else:\r\n for l in ustawienia: \r\n parametry[re.sub('\"','',l[0])] = l[1]\r\n \r\n # zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec\r\n # robie to recznie\r\n \r\n try:\r\n parametry['tryb'] = parametry['tryb'].strip() #tryb\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n print(\"Podaj tryb odczytu!\")\r\n try:\r\n parametry['bpm'] = int(parametry['bpm']) # tempo\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n parametry['loud'] = float(parametry['loud'] ) # glosnosc\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n # lista wag dla sampli\r\n parametry['wages'] = [float(s) for s in parametry['wages'].split(\",\")] \r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n \r\n return parametry\r\n \r\n#b = wczytywanie_ustawien(\"defs.txt\")\r\n \r\n \r\n#zglasnianie utworu\r\n\r\ndef zmiana_glosnosci(utwor, procent = 0):\r\n \"\"\"\r\n zmienia glosnosc utworu (jego amplitudy)\r\n \r\n arg:\r\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \r\n lub zciszony\r\n \r\n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \r\n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \r\n glosniej\", dla -1 \"100% ciszej\"\r\n \r\n wyjscie:\r\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\r\n \"\"\"\r\n if(-1 <= procent <= 1):\r\n #ile razy mamy pomnozyc amplitude naszego dzwieku\r\n mnoznik = 0\r\n if( procent < 0 ):\r\n mnoznik = 1 + procent\r\n else:\r\n # obliczamy najwyzsza amplitude w danym utworze i ona bedzie \r\n # wyznaczac jak bardzo mozemy podglosnic\r\n maks_ampli = 0\r\n maks_ampli = max(abs(utwor))\r\n mnoznik = 32767/maks_ampli # maksymalny mnoznik\r\n # mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej \r\n # (mnoznik-1) mnozymy o procent zglosnienia\r\n # i dodajemy do podstawy (czyli 1)\r\n mnoznik = 1 + (mnoznik - 1)*procent\r\n glosniej = mnoznik * utwor\r\n #glosniej = np.array(glosniej, dtype=np.int16)\r\n glosniej = glosniej.astype(np.int16) \r\n return glosniej\r\n else:\r\n print(\"Podaj procent z zakresu -1 do 1\")\r\n \r\n\r\n#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])\r\n#wierszyk1\r\n \r\n \r\n \r\n\r\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \\\r\n freq = 44100, wages = None, loud = 0):\r\n \"\"\"\r\n glowna funkcja generujaca cala piosenke\r\n \r\n arg:\r\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \r\n definicje kolejnych cwiercnut (co ma byc grane \r\n w danej cwiercnucie)\r\n \r\n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \r\n zapisana (nie jest, gdy tracki mialy nieodpowiednia \r\n liczbe wierszy lub kolumn)\r\n \r\n int: bpm - tempo piosenki w jednostce bpm\r\n \r\n int: freq - ilosc probek w jednej sekundzie\r\n \r\n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \r\n probka, 2 etc.)\r\n \r\n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \r\n maxa, -1 - sciszamy na maxa\r\n \r\n wyjscie:\r\n numpy.ndarray (numpy.int16): gotowy utwór\r\n \r\n \"\"\"\r\n \r\n \r\n # macierz piosenki byla pusta, piosenka nie zostala utworzona\r\n if(czy_pelna == False):\r\n print(\"Nie utworzono piosenki\")\r\n return None \r\n \r\n else:\r\n \r\n import numpy as np\r\n import scipy.io.wavfile\r\n \r\n t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od \r\n #tempa)\r\n ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut\r\n kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli\r\n frekw = freq\r\n czas_utworu = ile_cwiercnut*t_cwiercnuty\r\n # ile elementow bedzie w nowym utworze\r\n ilosc_probek = int(frekw*czas_utworu) \r\n \r\n # bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje \r\n # unikalne numery sampli\r\n rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z \"--\"\r\n \r\n # w slownikach zapiszemy parametry tych sampli\r\n # slownik z wartosciami danego sampla (tj. macierze numpy-owe z \r\n # amplitudami)\r\n sample_co = {} \r\n sample_frekw = {} # slownik z ich frekwencjami\r\n sample_dl = {} # slownik z ich dlugosciami\r\n \r\n #wczytujemy te sample\r\n # w iteratorze bierzemy napisy \"01\" \"02\" \"--\" itd. stringi!!!\r\n for ktory_sampel in rozne_sample: \r\n \r\n if(ktory_sampel != '--'):\r\n # tworzymy napis z nazwa pliku sampla, np. \"sample01.wav\"\r\n plik = ''.join(['sample',ktory_sampel,'.wav'])\r\n # wczytujemy zawartosc i frekwencje danego sampla do \r\n # odpowiednio nazwanego elementu w slowniku sample_co i \r\n # sample_frekw\r\n sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \\\r\n scipy.io.wavfile.read(plik)\r\n # tworzymy mono z naszego sampla\r\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\\\r\n axis=1)/32767\r\n # normalizujemy te wartosci\r\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \\\r\n max(np.abs(sample_co[ktory_sampel])) * 32767)\r\n # zapisujemy dlugosc sampli, czyli ilosc probek \r\n # ( = czas_trwania*frekwencja)\r\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\r\n \r\n else: # to samo robimy dla \"--\" recznie ustawiamy\r\n # robimy cisze, gdy --\r\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16) \r\n sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna\r\n sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund\r\n \r\n\r\n \r\n \r\n \r\n if wages is None:\r\n wages = np.ones((1,kanaly)) \r\n else:\r\n # zeby mialo wymiar (1,kanaly), a nie (kanaly,)\r\n wages = np.array(wages).reshape(1,kanaly) \r\n \r\n # definicja nowego utworu\r\n T = np.linspace(0, czas_utworu, ilosc_probek)\r\n \r\n for wiersz in range(0, ile_cwiercnut):\r\n\r\n sample = [] # wczytamy sample z danej cwiecnuty\r\n dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie\r\n\r\n for i in range(0, kanaly):\r\n \r\n sampus = macierz_piosenki[wiersz,i]\r\n sample.append(sample_co[sampus]) \r\n dlugosci.append(sample_dl[sampus])\r\n\r\n \r\n # bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac; \r\n # reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy \r\n # cisze (zera)\r\n maksik = max(dlugosci)\r\n # mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia \r\n # potem tych dzwiekow w jeden \r\n pusty = np.int16(np.zeros((len(sample), maksik)))\r\n\r\n # dodajemy nasze dzwieki do tej pustej\r\n for k in range(0, kanaly):\r\n pusty[k][0:dlugosci[k]] = sample[k]\r\n\r\n \r\n # mnozymy kolejne elementy wektora pusty (czyli sample) przez \r\n # wagi i sumujemy\r\n cwiercnuta = np.dot(wages, pusty) \r\n #otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy \r\n # element\r\n cwiercnuta = cwiercnuta[0]\r\n \r\n # poczatek biezacej cwiercnuty \r\n poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)\r\n \r\n # jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z \r\n # przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie \r\n # dzwieki, tak by zmiescic sie w tej dlugosci\r\n if (poczatek_cwiercnuty + maksik) > ilosc_probek:\r\n \r\n T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\\\r\n cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\\\r\n maksik)])]\r\n \r\n else:\r\n T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \\\r\n cwiercnuta\r\n \r\n T= np.array(T, dtype=np.int16)\r\n \r\n #ustalamy glosnosc utworu\r\n T = zmiana_glosnosci(T, loud)\r\n\r\n return T\r\n\r\n#pios, k = wczytywanie_sciezek(a)\r\n#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \\\r\n#wages = b['wages'])\r\n#wierszyk = tworzenie_piosenki(pios, k, **b)\r\n#wierszyk ",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from admin_tools.dashboard.modules import DashboardModule
from nodes.models import Node
from slices.models import Slice
class MyThingsDashboardModule(DashboardModule):
"""
Controller dashboard module to provide an overview to
the user of the nodes and slices of its groups.
"""
title="My Things"
template = "dashboard/modules/mythings.html"
def init_with_context(self, context):
user = context['request'].user
# Get user slices
slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True))
context['slices'] = slices
# Get user nodes
nodes = {}
nodes_states = ['offline', 'safe', 'production']
for group in user.groups.all():
nodes[group] = []
qs_nodes = Node.objects.filter(group=group)
for state in nodes_states:
nodes[group].append(qs_nodes.filter(state_set__value=state).count())
context['nodes_states'] = nodes_states
context['user_nodes'] = nodes
# initialize to calculate is_empty
self.has_data = nodes or slices
def is_empty(self):
return not self.has_data
|
normal
|
{
"blob_id": "90324392e763ac6ea78c77b909c4bea667d45e6c",
"index": 5896,
"step-1": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n <mask token>\n <mask token>\n <mask token>\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n <mask token>\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n",
"step-3": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n",
"step-4": "from admin_tools.dashboard.modules import DashboardModule\nfrom nodes.models import Node\nfrom slices.models import Slice\n\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n",
"step-5": "from admin_tools.dashboard.modules import DashboardModule\n\nfrom nodes.models import Node\nfrom slices.models import Slice\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title=\"My Things\"\n template = \"dashboard/modules/mythings.html\"\n \n def init_with_context(self, context):\n user = context['request'].user\n \n # Get user slices\n slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True))\n context['slices'] = slices\n \n # Get user nodes\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state).count())\n \n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n \n # initialize to calculate is_empty\n self.has_data = nodes or slices\n \n def is_empty(self):\n return not self.has_data\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from superwires import games, color
import random
SCORE = 0
## pizza_image= games.load_image("images/pizza.png")
## pizza = games.Sprite(image = pizza_image, x=SW/2, y=SH/2,
## dx =1, dy = 1)
## games.screen.add(pizza)
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Pan(games.Sprite):
""" A pan controlled by the mouse. """
def update(self):
""" Move to mouse coordinates """
self.x = games.mouse.x
#self.y = games.mouse.y
self.check_collide()
def check_collide(self):
""" Check for collision with pizza. """
for pizza in self.overlapping_sprites:
pizza.handle_collide()
class Pizza(games.Sprite):
def update(self):
global SCORE
#bouncing
if self.right > games.screen.width or self.left < 0:
self.dx = -self.dx
#SCORE += 1
#if self.bottom > games.screen.height or
if self.top < 0:
self.dy = -self.dy
#SCORE += 1
## if self.left > games.screen.width:
## self.right = 0
## SCORE +=1
## if self.right<0:
## self.left = games.screen.width
## SCORE +=1
##
## if self.top > games.screen.height:
## self.top = 0
## SCORE +=1
## if self.bottom < 0:
## self.bottom = games.screen.height
## SCORE +=1
##
def handle_collide(self):
#self.x = random.randrange(games.screen.width)
self.dy = -self.dy
class ScText(games.Text):
def update(self):
self.value = SCORE
def main():
# loaded img
bg_img = games.load_image("images/pizzeria.jpg", transparent = True)
pizza_img = games.load_image("images/pizza.png")
pan_img = games.load_image("images/mousepoint.png")
#added img to bg
games.screen.background = bg_img
#create pizza obj
pizza = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
pizza2 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
pizza3 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
pizza4 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
#create pan obj
pan = Pan(image = pan_img, x=games.mouse.x, y=games.mouse.y)
#create txt obj
score = ScText(value = SCORE, size = 60,
is_collideable = False,
color = color.black,
x = 550,
y = 30)
#draw objs to screen
games.screen.add(pizza)
games.screen.add(pizza2)
games.screen.add(pizza3)
games.screen.add(pizza4)
games.screen.add(score)
games.screen.add(pan)
#sets visibility of mouse while on screen
games.mouse.is_visible = False
#locks mouse to screen if True
games.screen.event_grab = False
#start mainloop
games.screen.mainloop()
#score = games.Text(value = "welcome", size = 60, color = color.black, x = 550, y = 30)
games.screen.add(score)
#### won_message = games.Message(value = "You lose!", color = color.blue, size = 100, x = games.screen.width/2, y = games.screen.height/2, lifetime = 250, after_death = games.screen.quit)
#### games.screen.add(won_message)
##game_over = games.Message(value = "Game Over",
## size = 100,
## color = color.blue,
## x = games.screen.width/2
## y = games.screen.height/2
## lifetime = 250,
## after_death = games.screen.quit)
##games.screen.add(game_over)
main()
##angle - Facing in degrees
##
##x - x-coordinate
##
##y - y-coordinate
##
##dx - x velocity
##
##dy - y velocity
##
##left - x-coordinate of left sprite edge
##
##right - x-coordinate of right sprite edge
##
##top - y-coordinate of top sprite edge
##
##bottom - y-coordinate of bottom sprite edge
##
##image - image object of sprite
##
##overlapping_sprites - List of other objects that overlap sprite
##
##is_collideable - Whether or not the sprite is collideable. True means sprite will register in collisions. False means sprite will not show up in collisions.
##Methods
##
##update() - Updates sprite. Automatically called every mainloop() cycle.
##
##destroy() - Removes sprite from the screen
|
normal
|
{
"blob_id": "ee16b91ce1c12ce78d23ff655304aebc39cb1639",
"index": 9693,
"step-1": "<mask token>\n\n\nclass Pan(games.Sprite):\n <mask token>\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n <mask token>\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\ndef main():\n bg_img = games.load_image('images/pizzeria.jpg', transparent=True)\n pizza_img = games.load_image('images/pizza.png')\n pan_img = games.load_image('images/mousepoint.png')\n games.screen.background = bg_img\n pizza = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.screen\n .height / 2, dx=random.randint(-10, 10), dy=random.randint(-10, 10))\n pizza2 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza3 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza4 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pan = Pan(image=pan_img, x=games.mouse.x, y=games.mouse.y)\n score = ScText(value=SCORE, size=60, is_collideable=False, color=color.\n black, x=550, y=30)\n games.screen.add(pizza)\n games.screen.add(pizza2)\n games.screen.add(pizza3)\n games.screen.add(pizza4)\n games.screen.add(score)\n games.screen.add(pan)\n games.mouse.is_visible = False\n games.screen.event_grab = False\n games.screen.mainloop()\n games.screen.add(score)\n\n\n<mask token>\n",
"step-4": "from superwires import games, color\nimport random\nSCORE = 0\ngames.init(screen_width=640, screen_height=480, fps=50)\n\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\ndef main():\n bg_img = games.load_image('images/pizzeria.jpg', transparent=True)\n pizza_img = games.load_image('images/pizza.png')\n pan_img = games.load_image('images/mousepoint.png')\n games.screen.background = bg_img\n pizza = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.screen\n .height / 2, dx=random.randint(-10, 10), dy=random.randint(-10, 10))\n pizza2 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza3 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza4 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pan = Pan(image=pan_img, x=games.mouse.x, y=games.mouse.y)\n score = ScText(value=SCORE, size=60, is_collideable=False, color=color.\n black, x=550, y=30)\n games.screen.add(pizza)\n games.screen.add(pizza2)\n games.screen.add(pizza3)\n games.screen.add(pizza4)\n games.screen.add(score)\n games.screen.add(pan)\n games.mouse.is_visible = False\n games.screen.event_grab = False\n games.screen.mainloop()\n games.screen.add(score)\n\n\nmain()\n",
"step-5": "from superwires import games, color\nimport random\n\nSCORE = 0\n\n\n\n\n \n## pizza_image= games.load_image(\"images/pizza.png\")\n## pizza = games.Sprite(image = pizza_image, x=SW/2, y=SH/2,\n## dx =1, dy = 1)\n## games.screen.add(pizza)\n\ngames.init(screen_width = 640, screen_height = 480, fps = 50)\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n \n \nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n #bouncing \n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n #SCORE += 1\n\n #if self.bottom > games.screen.height or\n if self.top < 0:\n self.dy = -self.dy\n #SCORE += 1\n \n## if self.left > games.screen.width:\n## self.right = 0\n## SCORE +=1\n## if self.right<0:\n## self.left = games.screen.width\n## SCORE +=1\n##\n## if self.top > games.screen.height:\n## self.top = 0\n## SCORE +=1\n## if self.bottom < 0:\n## self.bottom = games.screen.height\n## SCORE +=1\n## \n def handle_collide(self):\n #self.x = random.randrange(games.screen.width)\n self.dy = -self.dy\n \n\n\nclass ScText(games.Text):\n def update(self):\n self.value = SCORE\n\ndef main():\n # loaded img\n bg_img = games.load_image(\"images/pizzeria.jpg\", transparent = True)\n pizza_img = games.load_image(\"images/pizza.png\")\n pan_img = games.load_image(\"images/mousepoint.png\")\n\n #added img to bg\n games.screen.background = bg_img\n\n #create pizza obj\n pizza = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n pizza2 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n pizza3 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n pizza4 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n\n #create pan obj\n pan = Pan(image = pan_img, x=games.mouse.x, y=games.mouse.y)\n \n \n \n \n \n\n #create txt obj\n score = ScText(value = SCORE, size = 60,\n is_collideable = False,\n color = color.black,\n x = 550,\n y = 30)\n\n #draw objs to screen\n games.screen.add(pizza)\n games.screen.add(pizza2)\n games.screen.add(pizza3)\n games.screen.add(pizza4)\n games.screen.add(score)\n games.screen.add(pan)\n \n #sets visibility of mouse while on screen\n games.mouse.is_visible = False\n\n #locks mouse to screen if True\n games.screen.event_grab = False\n\n\n #start mainloop\n games.screen.mainloop()\n\n\n #score = games.Text(value = \"welcome\", size = 60, color = color.black, x = 550, y = 30)\n games.screen.add(score)\n\n#### won_message = games.Message(value = \"You lose!\", color = color.blue, size = 100, x = games.screen.width/2, y = games.screen.height/2, lifetime = 250, after_death = games.screen.quit)\n#### games.screen.add(won_message)\n\n##game_over = games.Message(value = \"Game Over\",\n## size = 100,\n## color = color.blue,\n## x = games.screen.width/2\n## y = games.screen.height/2\n## lifetime = 250,\n## after_death = games.screen.quit)\n##games.screen.add(game_over)\n\nmain()\n\n\n\n\n\n##angle - Facing in degrees\n##\n##x - x-coordinate\n##\n##y - y-coordinate\n##\n##dx - x velocity\n##\n##dy - y velocity\n##\n##left - x-coordinate of left sprite edge\n##\n##right - x-coordinate of right sprite edge\n##\n##top - y-coordinate of top sprite edge\n##\n##bottom - y-coordinate of bottom sprite edge\n##\n##image - image object of sprite\n##\n##overlapping_sprites - List of other objects that overlap sprite\n##\n##is_collideable - Whether or not the sprite is collideable. True means sprite will register in collisions. False means sprite will not show up in collisions.\n\n##Methods\n##\n##update() - Updates sprite. Automatically called every mainloop() cycle.\n##\n##destroy() - Removes sprite from the screen\n",
"step-ids": [
7,
9,
10,
13,
14
]
}
|
[
7,
9,
10,
13,
14
] |
<|reserved_special_token_0|>
class TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, networks=None):
if not subnets:
subnets = []
if not networks:
networks = []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, network_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
network_find.return_value = networks
yield ip_policy_update
def test_update_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=None))
def test_update_ip_policy_with_both_network_and_subnet_ids(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[1], subnet_ids=[1])))
<|reserved_special_token_0|>
def test_update_ip_policy_subnets_already_exists(self):
ipp = dict(id=1, subnets=[dict()])
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
<|reserved_special_token_0|>
def test_update_ip_policy_subnets_empty_exclude(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',
ip_policy=None)]) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,
ip_policy_delete):
ip_policy_find.return_value = ip_policy
yield ip_policy_find, ip_policy_delete
def test_delete_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy_in_use(self):
with self._stubs(dict(networks=True)):
with self.assertRaises(q_exc.IPPolicyInUse):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy(self):
ip_policy = dict(id=1, networks=[], subnets=[])
with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):
self.plugin.delete_ip_policy(self.context, 1)
self.assertEqual(ip_policy_find.call_count, 1)
self.assertEqual(ip_policy_delete.call_count, 1)
class TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, routes=None):
subnets = subnets or []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, route_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
route_find.return_value = routes
yield ip_policy_update
def test_update_ip_policy_has_route_conflict_raises(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools
):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=[])))
def test_update_ip_policy_no_route_conflict(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
try:
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))
)
except Exception as e:
self.fail("This shouldn't have raised: %s" % e)
class TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):
def test_normal_cidr_and_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_multiple_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=
'192.168.0.0/16')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96'), dict(id=2, cidr='::/64')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_invalid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24')])
def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,
cidr='::/96')])
def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=
'192.168.0.0/24')])
def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,
cidr='::/96'), dict(id=1, cidr='::/128')])
class TestQuarkEnsureDefaultPolicy(test_base.TestBase):
def test_no_cidrs_no_subnets(self):
cidrs = []
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, [])
self.assertEqual(subnets, [])
def test_no_cidrs_v4(self):
cidrs = []
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_subnets_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v4(self):
cidrs = ['192.168.10.20/32', '192.168.10.40/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',
'192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_cidrs_with_default_cidrs_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_cidrs_v6(self):
cidrs = []
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_subnets_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v6(self):
cidrs = ['::10/128', '::20/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_cidrs_with_default_cidrs_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_duplicates_in_result_when_called_twice(self):
cidrs = ['192.168.10.10/32']
subnets = [dict(cidr='192.168.10.0/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
cidrs2 = ['192.168.10.10/32']
self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, networks=None):
if not subnets:
subnets = []
if not networks:
networks = []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, network_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
network_find.return_value = networks
yield ip_policy_update
def test_update_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=None))
def test_update_ip_policy_with_both_network_and_subnet_ids(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[1], subnet_ids=[1])))
def test_update_ip_policy_subnets_not_found(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets_already_exists(self):
ipp = dict(id=1, subnets=[dict()])
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None)]
) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_subnets_empty_exclude(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',
ip_policy=None)]) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])
def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):
ipp = dict(id=1, subnets=[dict(cidr='0.0.0.0/16')], exclude=[
'0.0.0.40/32'], name='foo', tenant_id=1)
with self._stubs(ipp) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
exclude=['0.0.0.0/32', '0.0.255.255/32'])
<|reserved_special_token_0|>
def test_update_ip_policy_networks(self):
ipp = dict(id=1, networks=[dict()], exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, networks=[dict(id=1, ip_policy=None)]
) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(network_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_exclude_v4(self):
subnets = [dict(id=100, cidr='0.0.0.0/16')]
ipp = dict(id=1, subnets=subnets, exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=subnets) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=['0.0.0.1/32'])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.1/32', '0.0.0.0/32',
'0.0.255.255/32'])
def test_update_ip_policy_exclude_v6(self):
subnets = [dict(id=100, cidr='::/64')]
ipp = dict(id=1, subnets=subnets, exclude=['::/128'], name='foo',
tenant_id=1)
with self._stubs(ipp, subnets=subnets) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=['::1/128'])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['::1/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
class TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,
ip_policy_delete):
ip_policy_find.return_value = ip_policy
yield ip_policy_find, ip_policy_delete
def test_delete_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy_in_use(self):
with self._stubs(dict(networks=True)):
with self.assertRaises(q_exc.IPPolicyInUse):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy(self):
ip_policy = dict(id=1, networks=[], subnets=[])
with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):
self.plugin.delete_ip_policy(self.context, 1)
self.assertEqual(ip_policy_find.call_count, 1)
self.assertEqual(ip_policy_delete.call_count, 1)
class TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, routes=None):
subnets = subnets or []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, route_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
route_find.return_value = routes
yield ip_policy_update
def test_update_ip_policy_has_route_conflict_raises(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools
):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=[])))
def test_update_ip_policy_no_route_conflict(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
try:
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))
)
except Exception as e:
self.fail("This shouldn't have raised: %s" % e)
class TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):
def test_normal_cidr_and_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_multiple_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=
'192.168.0.0/16')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96'), dict(id=2, cidr='::/64')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_invalid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24')])
def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,
cidr='::/96')])
def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=
'192.168.0.0/24')])
def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,
cidr='::/96'), dict(id=1, cidr='::/128')])
class TestQuarkEnsureDefaultPolicy(test_base.TestBase):
def test_no_cidrs_no_subnets(self):
cidrs = []
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, [])
self.assertEqual(subnets, [])
def test_no_cidrs_v4(self):
cidrs = []
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_subnets_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v4(self):
cidrs = ['192.168.10.20/32', '192.168.10.40/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',
'192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_cidrs_with_default_cidrs_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_cidrs_v6(self):
cidrs = []
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_subnets_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v6(self):
cidrs = ['::10/128', '::20/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_cidrs_with_default_cidrs_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_duplicates_in_result_when_called_twice(self):
cidrs = ['192.168.10.10/32']
subnets = [dict(cidr='192.168.10.0/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
cidrs2 = ['192.168.10.10/32']
self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_create_ip_policy_invalid_body_missing_netsubnet(self):
with self._stubs(None):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(exclude=['1.1.1.1/24'])))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_create_ip_policy_network(self):
ipp = dict(subnet_id=None, network_id=1, exclude=['1.1.1.1/24'])
with self._stubs(ipp, nets=[dict(id=1, ip_policy=dict(id=2),
subnets=[dict(id=1, cidr='1.1.1.1/16')])]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(network_ids=[ipp['network_id']], exclude=ipp[
'exclude'])))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_create_ip_policy_only_called_once_with_multiple_networks(self):
ipp = dict(subnets=[], networks=[dict(id=1, subnets=[dict(id=1,
ip_policy=None, cidr='0.0.0.0/24')]), dict(id=2, subnets=[dict(
id=2, ip_policy=None, cidr='0.0.0.0/24')])], id=1, tenant_id=1,
exclude=[dict(cidr='0.0.0.1/32')], name='foo')
with self._stubs(ipp, nets=ipp['networks']) as ip_policy_create:
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1, 2], exclude=['0.0.0.1/32'])))
exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.0.255/32']
ip_policy_create.assert_called_once_with(self.context, exclude=
exclude, networks=[{'subnets': [{'cidr': '0.0.0.0/24',
'ip_policy': None, 'id': 1}], 'id': 1}, {'subnets': [{
'cidr': '0.0.0.0/24', 'ip_policy': None, 'id': 2}], 'id': 2}])
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp['subnet_ids'], [])
self.assertEqual(resp['network_ids'], [1, 2])
self.assertEqual(resp['exclude'], ['0.0.0.1/32'])
self.assertEqual(resp['name'], 'foo')
self.assertEqual(resp['tenant_id'], 1)
<|reserved_special_token_0|>
class TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, networks=None):
if not subnets:
subnets = []
if not networks:
networks = []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, network_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
network_find.return_value = networks
yield ip_policy_update
def test_update_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=None))
def test_update_ip_policy_with_both_network_and_subnet_ids(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[1], subnet_ids=[1])))
def test_update_ip_policy_subnets_not_found(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets_already_exists(self):
ipp = dict(id=1, subnets=[dict()])
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None)]
) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_subnets_empty_exclude(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',
ip_policy=None)]) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])
def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):
ipp = dict(id=1, subnets=[dict(cidr='0.0.0.0/16')], exclude=[
'0.0.0.40/32'], name='foo', tenant_id=1)
with self._stubs(ipp) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
exclude=['0.0.0.0/32', '0.0.255.255/32'])
def test_update_ip_policy_networks_not_found(self):
ipp = dict(id=1, networks=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[100])))
def test_update_ip_policy_networks(self):
ipp = dict(id=1, networks=[dict()], exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, networks=[dict(id=1, ip_policy=None)]
) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(network_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_exclude_v4(self):
subnets = [dict(id=100, cidr='0.0.0.0/16')]
ipp = dict(id=1, subnets=subnets, exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=subnets) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=['0.0.0.1/32'])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.1/32', '0.0.0.0/32',
'0.0.255.255/32'])
def test_update_ip_policy_exclude_v6(self):
subnets = [dict(id=100, cidr='::/64')]
ipp = dict(id=1, subnets=subnets, exclude=['::/128'], name='foo',
tenant_id=1)
with self._stubs(ipp, subnets=subnets) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=['::1/128'])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['::1/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
class TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,
ip_policy_delete):
ip_policy_find.return_value = ip_policy
yield ip_policy_find, ip_policy_delete
def test_delete_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy_in_use(self):
with self._stubs(dict(networks=True)):
with self.assertRaises(q_exc.IPPolicyInUse):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy(self):
ip_policy = dict(id=1, networks=[], subnets=[])
with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):
self.plugin.delete_ip_policy(self.context, 1)
self.assertEqual(ip_policy_find.call_count, 1)
self.assertEqual(ip_policy_delete.call_count, 1)
class TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, routes=None):
subnets = subnets or []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, route_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
route_find.return_value = routes
yield ip_policy_update
def test_update_ip_policy_has_route_conflict_raises(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools
):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=[])))
def test_update_ip_policy_no_route_conflict(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
try:
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))
)
except Exception as e:
self.fail("This shouldn't have raised: %s" % e)
class TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):
def test_normal_cidr_and_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_multiple_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=
'192.168.0.0/16')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96'), dict(id=2, cidr='::/64')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_invalid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24')])
def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,
cidr='::/96')])
def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=
'192.168.0.0/24')])
def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,
cidr='::/96'), dict(id=1, cidr='::/128')])
class TestQuarkEnsureDefaultPolicy(test_base.TestBase):
def test_no_cidrs_no_subnets(self):
cidrs = []
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, [])
self.assertEqual(subnets, [])
def test_no_cidrs_v4(self):
cidrs = []
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_subnets_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v4(self):
cidrs = ['192.168.10.20/32', '192.168.10.40/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',
'192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_cidrs_with_default_cidrs_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_cidrs_v6(self):
cidrs = []
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_subnets_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v6(self):
cidrs = ['::10/128', '::20/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_cidrs_with_default_cidrs_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_duplicates_in_result_when_called_twice(self):
cidrs = ['192.168.10.10/32']
subnets = [dict(cidr='192.168.10.0/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
cidrs2 = ['192.168.10.10/32']
self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, nets=None):
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.subnet_find' % db_mod), mock.
patch('%s.network_find' % db_mod), mock.patch(
'%s.ip_policy_create' % db_mod), mock.patch('%s.route_find' %
db_mod)) as (subnet_find, net_find, ip_policy_create, route_find):
subnet_find.return_value = subnets if subnets else None
net_find.return_value = nets if nets else None
ip_policy_create.return_value = ip_policy
route_find.return_value = [{'nexthop': '1.2.3.4'}]
yield ip_policy_create
<|reserved_special_token_0|>
def test_create_ip_policy_with_both_network_and_subnet_ids(self):
with self._stubs(None):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(network_ids=[1], subnet_ids=[1])))
def test_create_ip_policy_invalid_body_missing_netsubnet(self):
with self._stubs(None):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(exclude=['1.1.1.1/24'])))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_create_ip_policy_network_ip_policy_already_exists(self):
with self._stubs(None, nets=[dict(id=1, ip_policy=dict(id=2),
subnets=[dict(id=1, cidr='1.1.1.1/16')])]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(network_ids=[1], exclude=['1.1.1.1/24'])))
def test_create_ip_policy_subnet_ip_policy_already_exists(self):
with self._stubs(None, subnets=[dict(id=1, ip_policy=dict(id=2),
cidr='1.1.1.1/16')]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(subnet_ids=[1], exclude=['1.1.1.1/24'])))
def test_create_ip_policy_network(self):
ipp = dict(subnet_id=None, network_id=1, exclude=['1.1.1.1/24'])
with self._stubs(ipp, nets=[dict(id=1, ip_policy=dict(id=2),
subnets=[dict(id=1, cidr='1.1.1.1/16')])]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(network_ids=[ipp['network_id']], exclude=ipp[
'exclude'])))
def test_create_ip_policy_subnet(self):
ipp = dict(subnet_id=1, network_id=None, exclude=['1.1.1.1/24'])
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=2),
cidr='1.1.1.1/16')]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(subnet_ids=[ipp['subnet_id']], exclude=ipp[
'exclude'])))
def test_create_ip_policy_with_cidr_that_does_not_fit_into_subnet(self):
ipp = dict(subnets=[dict(id=1, version=4, cidr='192.168.1.1/24')],
networks=[], id=1, tenant_id=1, exclude=['10.10.10.100/32'],
name='foo')
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None, version=
ipp['subnets'][0]['version'], cidr=ipp['subnets'][0]['cidr'])]):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(ip_policy=
dict(subnet_ids=[1], exclude=ipp['exclude'])))
def test_create_ip_policy_with_ipv6_subnet_cidr(self):
ipp = dict(subnets=[dict(id=1, version=6, cidr='::/64')], networks=
[], id=1, tenant_id=1, exclude=[dict(cidr='::/128')], name='foo')
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None, version=
ipp['subnets'][0]['version'], cidr=ipp['subnets'][0]['cidr'])]):
exclude = [ippc['cidr'] for ippc in ipp['exclude']]
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1], exclude=exclude)))
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp['subnet_ids'], [1])
self.assertEqual(resp['network_ids'], [])
self.assertEqual(resp['exclude'], ['::/128'])
self.assertEqual(resp['name'], 'foo')
self.assertEqual(resp['tenant_id'], 1)
def test_create_ip_policy(self):
ipp = dict(subnets=[dict(id=1, cidr='0.0.0.0/16')], networks=[], id
=1, tenant_id=1, exclude=[dict(cidr='0.0.0.0/24')], name='foo')
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None, cidr=ipp[
'subnets'][0]['cidr'])]):
exclude = [ippc['cidr'] for ippc in ipp['exclude']]
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1], exclude=exclude)))
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp['subnet_ids'], [1])
self.assertEqual(resp['network_ids'], [])
self.assertEqual(resp['exclude'], ['0.0.0.0/24'])
self.assertEqual(resp['name'], 'foo')
self.assertEqual(resp['tenant_id'], 1)
def test_create_ip_policy_only_called_once_with_multiple_networks(self):
ipp = dict(subnets=[], networks=[dict(id=1, subnets=[dict(id=1,
ip_policy=None, cidr='0.0.0.0/24')]), dict(id=2, subnets=[dict(
id=2, ip_policy=None, cidr='0.0.0.0/24')])], id=1, tenant_id=1,
exclude=[dict(cidr='0.0.0.1/32')], name='foo')
with self._stubs(ipp, nets=ipp['networks']) as ip_policy_create:
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1, 2], exclude=['0.0.0.1/32'])))
exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.0.255/32']
ip_policy_create.assert_called_once_with(self.context, exclude=
exclude, networks=[{'subnets': [{'cidr': '0.0.0.0/24',
'ip_policy': None, 'id': 1}], 'id': 1}, {'subnets': [{
'cidr': '0.0.0.0/24', 'ip_policy': None, 'id': 2}], 'id': 2}])
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp['subnet_ids'], [])
self.assertEqual(resp['network_ids'], [1, 2])
self.assertEqual(resp['exclude'], ['0.0.0.1/32'])
self.assertEqual(resp['name'], 'foo')
self.assertEqual(resp['tenant_id'], 1)
def test_create_ip_policy_only_called_once_with_multiple_subnets(self):
ipp = dict(subnets=[dict(id=3, cidr='0.0.0.0/16'), dict(id=4, cidr=
'0.0.0.0/16')], networks=[], id=1, tenant_id=1, exclude=[dict(
cidr='0.0.0.1/32')], name='foo')
with self._stubs(ipp, subnets=ipp['subnets']) as ip_policy_create:
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[3, 4], exclude=['0.0.0.1/32'])))
exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.255.255/32']
ip_policy_create.assert_called_once_with(self.context, exclude=
exclude, subnets=[{'cidr': '0.0.0.0/16', 'id': 3}, {'cidr':
'0.0.0.0/16', 'id': 4}])
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp['subnet_ids'], [3, 4])
self.assertEqual(resp['network_ids'], [])
self.assertEqual(resp['exclude'], ['0.0.0.1/32'])
self.assertEqual(resp['name'], 'foo')
self.assertEqual(resp['tenant_id'], 1)
class TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, networks=None):
if not subnets:
subnets = []
if not networks:
networks = []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, network_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
network_find.return_value = networks
yield ip_policy_update
def test_update_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=None))
def test_update_ip_policy_with_both_network_and_subnet_ids(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[1], subnet_ids=[1])))
def test_update_ip_policy_subnets_not_found(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets_already_exists(self):
ipp = dict(id=1, subnets=[dict()])
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None)]
) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_subnets_empty_exclude(self):
ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',
ip_policy=None)]) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])
def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):
ipp = dict(id=1, subnets=[dict(cidr='0.0.0.0/16')], exclude=[
'0.0.0.40/32'], name='foo', tenant_id=1)
with self._stubs(ipp) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(exclude=[])))
ip_policy_update.assert_called_once_with(self.context, ipp,
exclude=['0.0.0.0/32', '0.0.255.255/32'])
def test_update_ip_policy_networks_not_found(self):
ipp = dict(id=1, networks=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[100])))
def test_update_ip_policy_networks(self):
ipp = dict(id=1, networks=[dict()], exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, networks=[dict(id=1, ip_policy=None)]
) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(network_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_exclude_v4(self):
subnets = [dict(id=100, cidr='0.0.0.0/16')]
ipp = dict(id=1, subnets=subnets, exclude=['0.0.0.0/24'], name=
'foo', tenant_id=1)
with self._stubs(ipp, subnets=subnets) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=['0.0.0.1/32'])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['0.0.0.1/32', '0.0.0.0/32',
'0.0.255.255/32'])
def test_update_ip_policy_exclude_v6(self):
subnets = [dict(id=100, cidr='::/64')]
ipp = dict(id=1, subnets=subnets, exclude=['::/128'], name='foo',
tenant_id=1)
with self._stubs(ipp, subnets=subnets) as ip_policy_update:
self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=
dict(subnet_ids=[100], exclude=['::1/128'])))
ip_policy_update.assert_called_once_with(self.context, ipp,
subnet_ids=[100], exclude=['::1/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
class TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,
ip_policy_delete):
ip_policy_find.return_value = ip_policy
yield ip_policy_find, ip_policy_delete
def test_delete_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy_in_use(self):
with self._stubs(dict(networks=True)):
with self.assertRaises(q_exc.IPPolicyInUse):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy(self):
ip_policy = dict(id=1, networks=[], subnets=[])
with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):
self.plugin.delete_ip_policy(self.context, 1)
self.assertEqual(ip_policy_find.call_count, 1)
self.assertEqual(ip_policy_delete.call_count, 1)
class TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, routes=None):
subnets = subnets or []
db_mod = 'quark.db.api'
with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),
mock.patch('%s.subnet_find' % db_mod), mock.patch(
'%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %
db_mod)) as (ip_policy_find, subnet_find, route_find,
ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
route_find.return_value = routes
yield ip_policy_update
def test_update_ip_policy_has_route_conflict_raises(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools
):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=[])))
def test_update_ip_policy_no_route_conflict(self):
subnet = dict(id=1, cidr='192.168.0.0/24')
ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name
='foo', tenant_id=1)
route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
try:
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))
)
except Exception as e:
self.fail("This shouldn't have raised: %s" % e)
class TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):
def test_normal_cidr_and_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='192.168.0.0/24')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_multiple_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=
'192.168.0.0/16')])
except Exception:
self.fail('Should not have failed')
def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,
cidr='::/96'), dict(id=2, cidr='::/64')])
except Exception:
self.fail('Should not have failed')
def test_normal_cidr_and_invalid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24')])
def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,
cidr='::/96')])
def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [
dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=
'192.168.0.0/24')])
def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,
cidr='::/96'), dict(id=1, cidr='::/128')])
class TestQuarkEnsureDefaultPolicy(test_base.TestBase):
def test_no_cidrs_no_subnets(self):
cidrs = []
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, [])
self.assertEqual(subnets, [])
def test_no_cidrs_v4(self):
cidrs = []
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_subnets_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v4(self):
cidrs = ['192.168.10.20/32', '192.168.10.40/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',
'192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_cidrs_with_default_cidrs_v4(self):
cidrs = ['192.168.10.0/32', '192.168.10.255/32']
subnets = [dict(cidr='192.168.10.1/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])
def test_no_cidrs_v6(self):
cidrs = []
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_subnets_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v6(self):
cidrs = ['::10/128', '::20/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',
'::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_cidrs_with_default_cidrs_v6(self):
cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']
subnets = [dict(cidr='::/64')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])
self.assertEqual(subnets, [dict(cidr='::/64')])
def test_no_duplicates_in_result_when_called_twice(self):
cidrs = ['192.168.10.10/32']
subnets = [dict(cidr='192.168.10.0/24')]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
cidrs2 = ['192.168.10.10/32']
self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))
self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',
'192.168.10.255/32'])
self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])
<|reserved_special_token_1|>
# Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import exceptions as n_exc_ext
from neutron_lib import exceptions as n_exc
from quark import exceptions as q_exc
from quark.plugin_modules import ip_policies as ippol
from quark.tests import test_base
from quark.tests import test_quark_plugin
class TestQuarkGetIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = "quark.db.api"
with mock.patch("%s.ip_policy_find" % db_mod) as ip_policy_find:
ip_policy_find.return_value = ip_policy
yield
def test_get_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.get_ip_policy(self.context, 1)
def test_get_ip_policy(self):
ip_policy = dict(
id=1,
tenant_id=1,
name="foo",
subnets=[dict(id=1)],
networks=[dict(id=2)],
exclude=[dict(cidr="0.0.0.0/32")])
with self._stubs(ip_policy):
resp = self.plugin.get_ip_policy(self.context, 1)
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["id"], 1)
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [2])
self.assertEqual(resp["exclude"], ["0.0.0.0/32"])
self.assertEqual(resp["tenant_id"], 1)
def test_get_ip_policies(self):
ip_policy = dict(
id=1,
tenant_id=1,
name="foo",
subnets=[dict(id=1)],
networks=[dict(id=2)],
exclude=[dict(cidr="0.0.0.0/32")])
with self._stubs([ip_policy]):
resp = self.plugin.get_ip_policies(self.context)
self.assertEqual(len(resp), 1)
resp = resp[0]
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["id"], 1)
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [2])
self.assertEqual(resp["exclude"], ["0.0.0.0/32"])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
class TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, nets=None):
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.ip_policy_create" % db_mod),
mock.patch("%s.route_find" % db_mod)
) as (subnet_find, net_find, ip_policy_create, route_find):
subnet_find.return_value = subnets if subnets else None
net_find.return_value = nets if nets else None
ip_policy_create.return_value = ip_policy
route_find.return_value = [{"nexthop": "1.2.3.4"}]
yield ip_policy_create
def test_create_ip_policy_invalid_body_missing_exclude(self):
with self._stubs(None):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict()))
def test_create_ip_policy_with_both_network_and_subnet_ids(self):
with self._stubs(None):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1], subnet_ids=[1])))
def test_create_ip_policy_invalid_body_missing_netsubnet(self):
with self._stubs(None):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(exclude=["1.1.1.1/24"])))
def test_create_ip_policy_invalid_subnet(self):
with self._stubs(None):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_invalid_network(self):
with self._stubs(None):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_network_ip_policy_already_exists(self):
with self._stubs(None, nets=[dict(id=1, ip_policy=dict(id=2),
subnets=[dict(id=1,
cidr="1.1.1.1/16")])]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_subnet_ip_policy_already_exists(self):
with self._stubs(None, subnets=[dict(id=1, ip_policy=dict(id=2),
cidr="1.1.1.1/16")]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=["1.1.1.1/24"])))
def test_create_ip_policy_network(self):
ipp = dict(subnet_id=None, network_id=1,
exclude=["1.1.1.1/24"])
with self._stubs(ipp, nets=[dict(id=1, ip_policy=dict(id=2),
subnets=[dict(id=1,
cidr="1.1.1.1/16")])]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[ipp["network_id"]],
exclude=ipp["exclude"])))
def test_create_ip_policy_subnet(self):
ipp = dict(subnet_id=1, network_id=None,
exclude=["1.1.1.1/24"])
with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=2),
cidr="1.1.1.1/16")]):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[ipp["subnet_id"]],
exclude=ipp["exclude"])))
def test_create_ip_policy_with_cidr_that_does_not_fit_into_subnet(self):
ipp = dict(
subnets=[dict(id=1, version=4, cidr="192.168.1.1/24")],
networks=[],
id=1,
tenant_id=1,
exclude=["10.10.10.100/32"],
name="foo")
with self._stubs(ipp,
subnets=[dict(id=1, ip_policy=None,
version=ipp["subnets"][0]["version"],
cidr=ipp["subnets"][0]["cidr"])]):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1],
exclude=ipp["exclude"])))
def test_create_ip_policy_with_ipv6_subnet_cidr(self):
ipp = dict(
subnets=[dict(id=1, version=6, cidr='::/64')],
networks=[],
id=1,
tenant_id=1,
exclude=[dict(cidr="::/128")],
name="foo")
with self._stubs(ipp,
subnets=[dict(id=1, ip_policy=None,
version=ipp["subnets"][0]["version"],
cidr=ipp["subnets"][0]["cidr"])]):
exclude = [ippc["cidr"] for ippc in ipp["exclude"]]
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1], exclude=exclude)))
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [])
# NOTE(jmeridth): below is mocked that way, so it won't get
# additional default policies in exclude
# ippol.ensure_default_policy is tested below in this file
self.assertEqual(resp["exclude"], ["::/128"])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
def test_create_ip_policy(self):
ipp = dict(
subnets=[dict(id=1, cidr='0.0.0.0/16')],
networks=[],
id=1,
tenant_id=1,
exclude=[dict(cidr="0.0.0.0/24")],
name="foo")
with self._stubs(ipp, subnets=[dict(
id=1, ip_policy=None, cidr=ipp["subnets"][0]["cidr"])]):
exclude = [ippc["cidr"] for ippc in ipp["exclude"]]
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[1], exclude=exclude)))
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["subnet_ids"], [1])
self.assertEqual(resp["network_ids"], [])
# NOTE(jmeridth): below is mocked that way, so it won't get
# additional default policies in exclude
# ippol.ensure_default_policy is tested below in this file
self.assertEqual(resp["exclude"], ["0.0.0.0/24"])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
def test_create_ip_policy_only_called_once_with_multiple_networks(self):
ipp = dict(
subnets=[],
networks=[dict(id=1, subnets=[dict(id=1,
ip_policy=None, cidr='0.0.0.0/24')]),
dict(id=2, subnets=[dict(id=2,
ip_policy=None, cidr='0.0.0.0/24')])],
id=1,
tenant_id=1,
exclude=[dict(cidr="0.0.0.1/32")],
name="foo")
with self._stubs(ipp, nets=ipp["networks"]) as (ip_policy_create):
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(network_ids=[1, 2], exclude=["0.0.0.1/32"])))
exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.0.255/32']
ip_policy_create.assert_called_once_with(
self.context, exclude=exclude,
networks=[{'subnets':
[{'cidr': '0.0.0.0/24', 'ip_policy': None,
'id': 1}], 'id': 1},
{'subnets':
[{'cidr': '0.0.0.0/24', 'ip_policy': None,
'id': 2}], 'id': 2}])
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["subnet_ids"], [])
self.assertEqual(resp["network_ids"], [1, 2])
# NOTE(jmeridth): below is mocked that way, so it won't get
# additional default policies in exclude
# ippol.ensure_default_policy is tested below in this file
self.assertEqual(resp["exclude"], ["0.0.0.1/32"])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
def test_create_ip_policy_only_called_once_with_multiple_subnets(self):
ipp = dict(
subnets=[dict(id=3, cidr='0.0.0.0/16'),
dict(id=4, cidr='0.0.0.0/16')],
networks=[],
id=1,
tenant_id=1,
exclude=[dict(cidr="0.0.0.1/32")],
name="foo")
with self._stubs(ipp, subnets=ipp["subnets"]) as (ip_policy_create):
resp = self.plugin.create_ip_policy(self.context, dict(
ip_policy=dict(subnet_ids=[3, 4], exclude=["0.0.0.1/32"])))
exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.255.255/32']
ip_policy_create.assert_called_once_with(
self.context, exclude=exclude,
subnets=[{'cidr': '0.0.0.0/16', 'id': 3},
{'cidr': '0.0.0.0/16', 'id': 4}])
self.assertEqual(len(resp.keys()), 6)
self.assertEqual(resp["subnet_ids"], [3, 4])
self.assertEqual(resp["network_ids"], [])
# NOTE(jmeridth): below is mocked that way, so it won't get
# additional default policies in exclude
# ippol.ensure_default_policy is tested below in this file
self.assertEqual(resp["exclude"], ["0.0.0.1/32"])
self.assertEqual(resp["name"], "foo")
self.assertEqual(resp["tenant_id"], 1)
class TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, networks=None):
if not subnets:
subnets = []
if not networks:
networks = []
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.ip_policy_find" % db_mod),
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.ip_policy_update" % db_mod),
) as (ip_policy_find, subnet_find, network_find, ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
network_find.return_value = networks
yield ip_policy_update
def test_update_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.update_ip_policy(self.context, 1,
dict(ip_policy=None))
def test_update_ip_policy_with_both_network_and_subnet_ids(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_ip_policy(self.context, 1, dict(
ip_policy=dict(network_ids=[1], subnet_ids=[1])))
def test_update_ip_policy_subnets_not_found(self):
ipp = dict(id=1, subnets=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.SubnetNotFound):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets_already_exists(self):
ipp = dict(id=1, subnets=[dict()])
with self._stubs(
ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]
):
with self.assertRaises(q_exc.IPPolicyAlreadyExists):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100])))
def test_update_ip_policy_subnets(self):
ipp = dict(id=1, subnets=[dict()],
exclude=["0.0.0.0/24"],
name="foo", tenant_id=1)
with self._stubs(
ipp, subnets=[dict(id=1, ip_policy=None)]
) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_subnets_empty_exclude(self):
ipp = dict(id=1, subnets=[dict()],
exclude=["0.0.0.40/32"],
name="foo", tenant_id=1)
with self._stubs(
ipp, subnets=[dict(id=1, cidr="0.0.0.0/16", ip_policy=None)]
) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100], exclude=[])))
ip_policy_update.assert_called_once_with(
self.context, ipp, subnet_ids=[100], exclude=[
"0.0.0.0/32", "0.0.255.255/32"])
def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):
ipp = dict(id=1, subnets=[dict(cidr="0.0.0.0/16")],
exclude=["0.0.0.40/32"],
name="foo", tenant_id=1)
with self._stubs(ipp) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(exclude=[])))
ip_policy_update.assert_called_once_with(
self.context, ipp, exclude=["0.0.0.0/32", "0.0.255.255/32"])
def test_update_ip_policy_networks_not_found(self):
ipp = dict(id=1, networks=[])
with self._stubs(ipp):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(network_ids=[100])))
def test_update_ip_policy_networks(self):
ipp = dict(id=1, networks=[dict()],
exclude=["0.0.0.0/24"],
name="foo", tenant_id=1)
with self._stubs(
ipp, networks=[dict(id=1, ip_policy=None)]
) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(network_ids=[100])))
self.assertEqual(ip_policy_update.called, 1)
def test_update_ip_policy_exclude_v4(self):
subnets = [dict(id=100, cidr="0.0.0.0/16")]
ipp = dict(id=1, subnets=subnets,
exclude=["0.0.0.0/24"],
name="foo", tenant_id=1)
with self._stubs(ipp, subnets=subnets) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100], exclude=["0.0.0.1/32"])))
ip_policy_update.assert_called_once_with(
self.context,
ipp,
subnet_ids=[100],
exclude=["0.0.0.1/32", "0.0.0.0/32", "0.0.255.255/32"])
def test_update_ip_policy_exclude_v6(self):
subnets = [dict(id=100, cidr="::/64")]
ipp = dict(id=1, subnets=subnets,
exclude=["::/128"],
name="foo", tenant_id=1)
with self._stubs(ipp, subnets=subnets) as (ip_policy_update):
self.plugin.update_ip_policy(
self.context,
1,
dict(ip_policy=dict(subnet_ids=[100], exclude=["::1/128"])))
ip_policy_update.assert_called_once_with(
self.context,
ipp,
subnet_ids=[100],
exclude=["::1/128", "::/128", "::ffff:ffff:ffff:ffff/128"])
class TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy):
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.ip_policy_find" % db_mod),
mock.patch("%s.ip_policy_delete" % db_mod),
) as (ip_policy_find, ip_policy_delete):
ip_policy_find.return_value = ip_policy
yield ip_policy_find, ip_policy_delete
def test_delete_ip_policy_not_found(self):
with self._stubs(None):
with self.assertRaises(q_exc.IPPolicyNotFound):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy_in_use(self):
with self._stubs(dict(networks=True)):
with self.assertRaises(q_exc.IPPolicyInUse):
self.plugin.delete_ip_policy(self.context, 1)
def test_delete_ip_policy(self):
ip_policy = dict(
id=1,
networks=[],
subnets=[])
with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):
self.plugin.delete_ip_policy(self.context, 1)
self.assertEqual(ip_policy_find.call_count, 1)
self.assertEqual(ip_policy_delete.call_count, 1)
class TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ip_policy, subnets=None, routes=None):
subnets = subnets or []
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.ip_policy_find" % db_mod),
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.route_find" % db_mod),
mock.patch("%s.ip_policy_update" % db_mod),
) as (ip_policy_find, subnet_find, route_find, ip_policy_update):
ip_policy_find.return_value = ip_policy
subnet_find.return_value = subnets
route_find.return_value = routes
yield ip_policy_update
def test_update_ip_policy_has_route_conflict_raises(self):
subnet = dict(id=1, cidr="192.168.0.0/24")
ipp = dict(id=1, subnets=[subnet], exclude=["192.168.0.1/32"],
name="foo", tenant_id=1)
route = {"gateway": "192.168.0.1", "subnet_id": subnet["id"]}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
with self.assertRaises(
n_exc_ext.GatewayConflictWithAllocationPools):
self.plugin.update_ip_policy(
self.context, 1,
dict(ip_policy=dict(subnet_ids=[1], exclude=[])))
def test_update_ip_policy_no_route_conflict(self):
subnet = dict(id=1, cidr="192.168.0.0/24")
ipp = dict(id=1, subnets=[subnet], exclude=["192.168.0.1/32"],
name="foo", tenant_id=1)
route = {"gateway": "192.168.0.1", "subnet_id": subnet["id"]}
with self._stubs(ipp, subnets=[subnet], routes=[route]):
try:
self.plugin.update_ip_policy(
self.context, 1,
dict(ip_policy=dict(subnet_ids=[1],
exclude=["192.168.0.0/24"])))
except Exception as e:
self.fail("This shouldn't have raised: %s" % e)
class TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):
def test_normal_cidr_and_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(
["192.168.0.100/32"],
[dict(id=1, cidr="192.168.0.0/24")])
except Exception:
self.fail("Should not have failed")
def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(
["192.168.0.100/32"], [dict(id=1, cidr="::/96")])
except Exception:
self.fail("Should not have failed")
def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(
["::/128"], [dict(id=1, cidr="::/96")])
except Exception:
self.fail("Should not have failed")
def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(
["::/128"], [dict(id=1, cidr="192.168.0.0/24")])
except Exception:
self.fail("Should not have failed")
def test_normal_cidr_and_multiple_valid_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(
["192.168.0.100/32"],
[dict(id=1, cidr="192.168.0.0/24"),
dict(id=2, cidr="192.168.0.0/16")])
except Exception:
self.fail("Should not have failed")
def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):
try:
ippol._validate_cidrs_fit_into_subnets(
["::/128"],
[dict(id=1, cidr="::/96"),
dict(id=2, cidr="::/64")])
except Exception:
self.fail("Should not have failed")
def test_normal_cidr_and_invalid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(
["192.168.0.100/32"],
[dict(id=1, cidr="10.10.10.0/24")])
def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(
["::/64"], [dict(id=1, cidr="::/96")])
def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(
["192.168.0.100/32"],
[dict(id=1, cidr="10.10.10.0/24"),
dict(id=1, cidr="192.168.0.0/24")])
def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):
with self.assertRaises(n_exc.BadRequest):
ippol._validate_cidrs_fit_into_subnets(
["::/127"],
[dict(id=1, cidr="::/96"),
dict(id=1, cidr="::/128")])
class TestQuarkEnsureDefaultPolicy(test_base.TestBase):
def test_no_cidrs_no_subnets(self):
cidrs = []
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, [])
self.assertEqual(subnets, [])
def test_no_cidrs_v4(self):
cidrs = []
subnets = [dict(cidr="192.168.10.1/24")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["192.168.10.0/32", "192.168.10.255/32"])
self.assertEqual(subnets, [dict(cidr="192.168.10.1/24")])
def test_no_subnets_v4(self):
cidrs = ["192.168.10.0/32", "192.168.10.255/32"]
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["192.168.10.0/32", "192.168.10.255/32"])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v4(self):
cidrs = ["192.168.10.20/32", "192.168.10.40/32"]
subnets = [dict(cidr="192.168.10.1/24")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["192.168.10.20/32", "192.168.10.40/32",
"192.168.10.0/32", "192.168.10.255/32"])
self.assertEqual(subnets, [dict(cidr="192.168.10.1/24")])
def test_cidrs_with_default_cidrs_v4(self):
cidrs = ["192.168.10.0/32", "192.168.10.255/32"]
subnets = [dict(cidr="192.168.10.1/24")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["192.168.10.0/32", "192.168.10.255/32"])
self.assertEqual(subnets, [dict(cidr="192.168.10.1/24")])
def test_no_cidrs_v6(self):
cidrs = []
subnets = [dict(cidr="::/64")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["::/128", "::ffff:ffff:ffff:ffff/128"])
self.assertEqual(subnets, [dict(cidr="::/64")])
def test_no_subnets_v6(self):
cidrs = ["::/128", "::ffff:ffff:ffff:ffff/128"]
subnets = []
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["::/128", "::ffff:ffff:ffff:ffff/128"])
self.assertEqual(subnets, [])
def test_cidrs_without_default_cidrs_v6(self):
cidrs = ["::10/128", "::20/128"]
subnets = [dict(cidr="::/64")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["::10/128", "::20/128",
"::/128", "::ffff:ffff:ffff:ffff/128"])
self.assertEqual(subnets, [dict(cidr="::/64")])
def test_cidrs_with_default_cidrs_v6(self):
cidrs = ["::/128", "::ffff:ffff:ffff:ffff/128"]
subnets = [dict(cidr="::/64")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["::/128", "::ffff:ffff:ffff:ffff/128"])
self.assertEqual(subnets, [dict(cidr="::/64")])
def test_no_duplicates_in_result_when_called_twice(self):
cidrs = ["192.168.10.10/32"]
subnets = [dict(cidr="192.168.10.0/24")]
self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))
self.assertEqual(cidrs, ["192.168.10.10/32", "192.168.10.0/32",
"192.168.10.255/32"])
cidrs2 = ["192.168.10.10/32"]
self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))
self.assertEqual(cidrs, ["192.168.10.10/32", "192.168.10.0/32",
"192.168.10.255/32"])
self.assertEqual(subnets, [dict(cidr="192.168.10.0/24")])
|
flexible
|
{
"blob_id": "cf931da4c06e16fe6f6da5eb1826d8b7a59c1f7b",
"index": 9057,
"step-1": "<mask token>\n\n\nclass TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, networks=None):\n if not subnets:\n subnets = []\n if not networks:\n networks = []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, network_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n network_find.return_value = networks\n yield ip_policy_update\n\n def test_update_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=None))\n\n def test_update_ip_policy_with_both_network_and_subnet_ids(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[1], subnet_ids=[1])))\n <mask token>\n\n def test_update_ip_policy_subnets_already_exists(self):\n ipp = dict(id=1, subnets=[dict()])\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n <mask token>\n\n def test_update_ip_policy_subnets_empty_exclude(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',\n ip_policy=None)]) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy):\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,\n ip_policy_delete):\n ip_policy_find.return_value = ip_policy\n yield ip_policy_find, ip_policy_delete\n\n def test_delete_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy_in_use(self):\n with self._stubs(dict(networks=True)):\n with self.assertRaises(q_exc.IPPolicyInUse):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy(self):\n ip_policy = dict(id=1, networks=[], subnets=[])\n with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):\n self.plugin.delete_ip_policy(self.context, 1)\n self.assertEqual(ip_policy_find.call_count, 1)\n self.assertEqual(ip_policy_delete.call_count, 1)\n\n\nclass TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, routes=None):\n subnets = subnets or []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, route_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n route_find.return_value = routes\n yield ip_policy_update\n\n def test_update_ip_policy_has_route_conflict_raises(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools\n ):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=[])))\n\n def test_update_ip_policy_no_route_conflict(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n try:\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))\n )\n except Exception as e:\n self.fail(\"This shouldn't have raised: %s\" % e)\n\n\nclass TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):\n\n def test_normal_cidr_and_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_multiple_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=\n '192.168.0.0/16')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96'), dict(id=2, cidr='::/64')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_invalid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24')])\n\n def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,\n cidr='::/96')])\n\n def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=\n '192.168.0.0/24')])\n\n def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,\n cidr='::/96'), dict(id=1, cidr='::/128')])\n\n\nclass TestQuarkEnsureDefaultPolicy(test_base.TestBase):\n\n def test_no_cidrs_no_subnets(self):\n cidrs = []\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [])\n self.assertEqual(subnets, [])\n\n def test_no_cidrs_v4(self):\n cidrs = []\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_subnets_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v4(self):\n cidrs = ['192.168.10.20/32', '192.168.10.40/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',\n '192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_cidrs_with_default_cidrs_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_cidrs_v6(self):\n cidrs = []\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_subnets_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v6(self):\n cidrs = ['::10/128', '::20/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_cidrs_with_default_cidrs_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_duplicates_in_result_when_called_twice(self):\n cidrs = ['192.168.10.10/32']\n subnets = [dict(cidr='192.168.10.0/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n cidrs2 = ['192.168.10.10/32']\n self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])\n",
"step-2": "<mask token>\n\n\nclass TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, networks=None):\n if not subnets:\n subnets = []\n if not networks:\n networks = []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, network_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n network_find.return_value = networks\n yield ip_policy_update\n\n def test_update_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=None))\n\n def test_update_ip_policy_with_both_network_and_subnet_ids(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[1], subnet_ids=[1])))\n\n def test_update_ip_policy_subnets_not_found(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.SubnetNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets_already_exists(self):\n ipp = dict(id=1, subnets=[dict()])\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None)]\n ) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_subnets_empty_exclude(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',\n ip_policy=None)]) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])\n\n def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):\n ipp = dict(id=1, subnets=[dict(cidr='0.0.0.0/16')], exclude=[\n '0.0.0.40/32'], name='foo', tenant_id=1)\n with self._stubs(ipp) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n exclude=['0.0.0.0/32', '0.0.255.255/32'])\n <mask token>\n\n def test_update_ip_policy_networks(self):\n ipp = dict(id=1, networks=[dict()], exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, networks=[dict(id=1, ip_policy=None)]\n ) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(network_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_exclude_v4(self):\n subnets = [dict(id=100, cidr='0.0.0.0/16')]\n ipp = dict(id=1, subnets=subnets, exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=['0.0.0.1/32'])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.1/32', '0.0.0.0/32',\n '0.0.255.255/32'])\n\n def test_update_ip_policy_exclude_v6(self):\n subnets = [dict(id=100, cidr='::/64')]\n ipp = dict(id=1, subnets=subnets, exclude=['::/128'], name='foo',\n tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=['::1/128'])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['::1/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n\n\nclass TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy):\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,\n ip_policy_delete):\n ip_policy_find.return_value = ip_policy\n yield ip_policy_find, ip_policy_delete\n\n def test_delete_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy_in_use(self):\n with self._stubs(dict(networks=True)):\n with self.assertRaises(q_exc.IPPolicyInUse):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy(self):\n ip_policy = dict(id=1, networks=[], subnets=[])\n with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):\n self.plugin.delete_ip_policy(self.context, 1)\n self.assertEqual(ip_policy_find.call_count, 1)\n self.assertEqual(ip_policy_delete.call_count, 1)\n\n\nclass TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, routes=None):\n subnets = subnets or []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, route_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n route_find.return_value = routes\n yield ip_policy_update\n\n def test_update_ip_policy_has_route_conflict_raises(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools\n ):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=[])))\n\n def test_update_ip_policy_no_route_conflict(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n try:\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))\n )\n except Exception as e:\n self.fail(\"This shouldn't have raised: %s\" % e)\n\n\nclass TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):\n\n def test_normal_cidr_and_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_multiple_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=\n '192.168.0.0/16')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96'), dict(id=2, cidr='::/64')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_invalid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24')])\n\n def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,\n cidr='::/96')])\n\n def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=\n '192.168.0.0/24')])\n\n def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,\n cidr='::/96'), dict(id=1, cidr='::/128')])\n\n\nclass TestQuarkEnsureDefaultPolicy(test_base.TestBase):\n\n def test_no_cidrs_no_subnets(self):\n cidrs = []\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [])\n self.assertEqual(subnets, [])\n\n def test_no_cidrs_v4(self):\n cidrs = []\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_subnets_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v4(self):\n cidrs = ['192.168.10.20/32', '192.168.10.40/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',\n '192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_cidrs_with_default_cidrs_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_cidrs_v6(self):\n cidrs = []\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_subnets_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v6(self):\n cidrs = ['::10/128', '::20/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_cidrs_with_default_cidrs_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_duplicates_in_result_when_called_twice(self):\n cidrs = ['192.168.10.10/32']\n subnets = [dict(cidr='192.168.10.0/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n cidrs2 = ['192.168.10.10/32']\n self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])\n",
"step-3": "<mask token>\n\n\nclass TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n <mask token>\n <mask token>\n <mask token>\n\n def test_create_ip_policy_invalid_body_missing_netsubnet(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(exclude=['1.1.1.1/24'])))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_create_ip_policy_network(self):\n ipp = dict(subnet_id=None, network_id=1, exclude=['1.1.1.1/24'])\n with self._stubs(ipp, nets=[dict(id=1, ip_policy=dict(id=2),\n subnets=[dict(id=1, cidr='1.1.1.1/16')])]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(network_ids=[ipp['network_id']], exclude=ipp[\n 'exclude'])))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_create_ip_policy_only_called_once_with_multiple_networks(self):\n ipp = dict(subnets=[], networks=[dict(id=1, subnets=[dict(id=1,\n ip_policy=None, cidr='0.0.0.0/24')]), dict(id=2, subnets=[dict(\n id=2, ip_policy=None, cidr='0.0.0.0/24')])], id=1, tenant_id=1,\n exclude=[dict(cidr='0.0.0.1/32')], name='foo')\n with self._stubs(ipp, nets=ipp['networks']) as ip_policy_create:\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[1, 2], exclude=['0.0.0.1/32'])))\n exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.0.255/32']\n ip_policy_create.assert_called_once_with(self.context, exclude=\n exclude, networks=[{'subnets': [{'cidr': '0.0.0.0/24',\n 'ip_policy': None, 'id': 1}], 'id': 1}, {'subnets': [{\n 'cidr': '0.0.0.0/24', 'ip_policy': None, 'id': 2}], 'id': 2}])\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp['subnet_ids'], [])\n self.assertEqual(resp['network_ids'], [1, 2])\n self.assertEqual(resp['exclude'], ['0.0.0.1/32'])\n self.assertEqual(resp['name'], 'foo')\n self.assertEqual(resp['tenant_id'], 1)\n <mask token>\n\n\nclass TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, networks=None):\n if not subnets:\n subnets = []\n if not networks:\n networks = []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, network_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n network_find.return_value = networks\n yield ip_policy_update\n\n def test_update_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=None))\n\n def test_update_ip_policy_with_both_network_and_subnet_ids(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[1], subnet_ids=[1])))\n\n def test_update_ip_policy_subnets_not_found(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.SubnetNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets_already_exists(self):\n ipp = dict(id=1, subnets=[dict()])\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None)]\n ) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_subnets_empty_exclude(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',\n ip_policy=None)]) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])\n\n def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):\n ipp = dict(id=1, subnets=[dict(cidr='0.0.0.0/16')], exclude=[\n '0.0.0.40/32'], name='foo', tenant_id=1)\n with self._stubs(ipp) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n exclude=['0.0.0.0/32', '0.0.255.255/32'])\n\n def test_update_ip_policy_networks_not_found(self):\n ipp = dict(id=1, networks=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.NetworkNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[100])))\n\n def test_update_ip_policy_networks(self):\n ipp = dict(id=1, networks=[dict()], exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, networks=[dict(id=1, ip_policy=None)]\n ) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(network_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_exclude_v4(self):\n subnets = [dict(id=100, cidr='0.0.0.0/16')]\n ipp = dict(id=1, subnets=subnets, exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=['0.0.0.1/32'])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.1/32', '0.0.0.0/32',\n '0.0.255.255/32'])\n\n def test_update_ip_policy_exclude_v6(self):\n subnets = [dict(id=100, cidr='::/64')]\n ipp = dict(id=1, subnets=subnets, exclude=['::/128'], name='foo',\n tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=['::1/128'])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['::1/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n\n\nclass TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy):\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,\n ip_policy_delete):\n ip_policy_find.return_value = ip_policy\n yield ip_policy_find, ip_policy_delete\n\n def test_delete_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy_in_use(self):\n with self._stubs(dict(networks=True)):\n with self.assertRaises(q_exc.IPPolicyInUse):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy(self):\n ip_policy = dict(id=1, networks=[], subnets=[])\n with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):\n self.plugin.delete_ip_policy(self.context, 1)\n self.assertEqual(ip_policy_find.call_count, 1)\n self.assertEqual(ip_policy_delete.call_count, 1)\n\n\nclass TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, routes=None):\n subnets = subnets or []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, route_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n route_find.return_value = routes\n yield ip_policy_update\n\n def test_update_ip_policy_has_route_conflict_raises(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools\n ):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=[])))\n\n def test_update_ip_policy_no_route_conflict(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n try:\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))\n )\n except Exception as e:\n self.fail(\"This shouldn't have raised: %s\" % e)\n\n\nclass TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):\n\n def test_normal_cidr_and_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_multiple_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=\n '192.168.0.0/16')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96'), dict(id=2, cidr='::/64')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_invalid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24')])\n\n def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,\n cidr='::/96')])\n\n def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=\n '192.168.0.0/24')])\n\n def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,\n cidr='::/96'), dict(id=1, cidr='::/128')])\n\n\nclass TestQuarkEnsureDefaultPolicy(test_base.TestBase):\n\n def test_no_cidrs_no_subnets(self):\n cidrs = []\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [])\n self.assertEqual(subnets, [])\n\n def test_no_cidrs_v4(self):\n cidrs = []\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_subnets_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v4(self):\n cidrs = ['192.168.10.20/32', '192.168.10.40/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',\n '192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_cidrs_with_default_cidrs_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_cidrs_v6(self):\n cidrs = []\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_subnets_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v6(self):\n cidrs = ['::10/128', '::20/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_cidrs_with_default_cidrs_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_duplicates_in_result_when_called_twice(self):\n cidrs = ['192.168.10.10/32']\n subnets = [dict(cidr='192.168.10.0/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n cidrs2 = ['192.168.10.10/32']\n self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])\n",
"step-4": "<mask token>\n\n\nclass TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, nets=None):\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.subnet_find' % db_mod), mock.\n patch('%s.network_find' % db_mod), mock.patch(\n '%s.ip_policy_create' % db_mod), mock.patch('%s.route_find' %\n db_mod)) as (subnet_find, net_find, ip_policy_create, route_find):\n subnet_find.return_value = subnets if subnets else None\n net_find.return_value = nets if nets else None\n ip_policy_create.return_value = ip_policy\n route_find.return_value = [{'nexthop': '1.2.3.4'}]\n yield ip_policy_create\n <mask token>\n\n def test_create_ip_policy_with_both_network_and_subnet_ids(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(network_ids=[1], subnet_ids=[1])))\n\n def test_create_ip_policy_invalid_body_missing_netsubnet(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(exclude=['1.1.1.1/24'])))\n <mask token>\n <mask token>\n\n def test_create_ip_policy_network_ip_policy_already_exists(self):\n with self._stubs(None, nets=[dict(id=1, ip_policy=dict(id=2),\n subnets=[dict(id=1, cidr='1.1.1.1/16')])]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(network_ids=[1], exclude=['1.1.1.1/24'])))\n\n def test_create_ip_policy_subnet_ip_policy_already_exists(self):\n with self._stubs(None, subnets=[dict(id=1, ip_policy=dict(id=2),\n cidr='1.1.1.1/16')]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(subnet_ids=[1], exclude=['1.1.1.1/24'])))\n\n def test_create_ip_policy_network(self):\n ipp = dict(subnet_id=None, network_id=1, exclude=['1.1.1.1/24'])\n with self._stubs(ipp, nets=[dict(id=1, ip_policy=dict(id=2),\n subnets=[dict(id=1, cidr='1.1.1.1/16')])]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(network_ids=[ipp['network_id']], exclude=ipp[\n 'exclude'])))\n\n def test_create_ip_policy_subnet(self):\n ipp = dict(subnet_id=1, network_id=None, exclude=['1.1.1.1/24'])\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=2),\n cidr='1.1.1.1/16')]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(subnet_ids=[ipp['subnet_id']], exclude=ipp[\n 'exclude'])))\n\n def test_create_ip_policy_with_cidr_that_does_not_fit_into_subnet(self):\n ipp = dict(subnets=[dict(id=1, version=4, cidr='192.168.1.1/24')],\n networks=[], id=1, tenant_id=1, exclude=['10.10.10.100/32'],\n name='foo')\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None, version=\n ipp['subnets'][0]['version'], cidr=ipp['subnets'][0]['cidr'])]):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(ip_policy=\n dict(subnet_ids=[1], exclude=ipp['exclude'])))\n\n def test_create_ip_policy_with_ipv6_subnet_cidr(self):\n ipp = dict(subnets=[dict(id=1, version=6, cidr='::/64')], networks=\n [], id=1, tenant_id=1, exclude=[dict(cidr='::/128')], name='foo')\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None, version=\n ipp['subnets'][0]['version'], cidr=ipp['subnets'][0]['cidr'])]):\n exclude = [ippc['cidr'] for ippc in ipp['exclude']]\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1], exclude=exclude)))\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp['subnet_ids'], [1])\n self.assertEqual(resp['network_ids'], [])\n self.assertEqual(resp['exclude'], ['::/128'])\n self.assertEqual(resp['name'], 'foo')\n self.assertEqual(resp['tenant_id'], 1)\n\n def test_create_ip_policy(self):\n ipp = dict(subnets=[dict(id=1, cidr='0.0.0.0/16')], networks=[], id\n =1, tenant_id=1, exclude=[dict(cidr='0.0.0.0/24')], name='foo')\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None, cidr=ipp[\n 'subnets'][0]['cidr'])]):\n exclude = [ippc['cidr'] for ippc in ipp['exclude']]\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1], exclude=exclude)))\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp['subnet_ids'], [1])\n self.assertEqual(resp['network_ids'], [])\n self.assertEqual(resp['exclude'], ['0.0.0.0/24'])\n self.assertEqual(resp['name'], 'foo')\n self.assertEqual(resp['tenant_id'], 1)\n\n def test_create_ip_policy_only_called_once_with_multiple_networks(self):\n ipp = dict(subnets=[], networks=[dict(id=1, subnets=[dict(id=1,\n ip_policy=None, cidr='0.0.0.0/24')]), dict(id=2, subnets=[dict(\n id=2, ip_policy=None, cidr='0.0.0.0/24')])], id=1, tenant_id=1,\n exclude=[dict(cidr='0.0.0.1/32')], name='foo')\n with self._stubs(ipp, nets=ipp['networks']) as ip_policy_create:\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[1, 2], exclude=['0.0.0.1/32'])))\n exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.0.255/32']\n ip_policy_create.assert_called_once_with(self.context, exclude=\n exclude, networks=[{'subnets': [{'cidr': '0.0.0.0/24',\n 'ip_policy': None, 'id': 1}], 'id': 1}, {'subnets': [{\n 'cidr': '0.0.0.0/24', 'ip_policy': None, 'id': 2}], 'id': 2}])\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp['subnet_ids'], [])\n self.assertEqual(resp['network_ids'], [1, 2])\n self.assertEqual(resp['exclude'], ['0.0.0.1/32'])\n self.assertEqual(resp['name'], 'foo')\n self.assertEqual(resp['tenant_id'], 1)\n\n def test_create_ip_policy_only_called_once_with_multiple_subnets(self):\n ipp = dict(subnets=[dict(id=3, cidr='0.0.0.0/16'), dict(id=4, cidr=\n '0.0.0.0/16')], networks=[], id=1, tenant_id=1, exclude=[dict(\n cidr='0.0.0.1/32')], name='foo')\n with self._stubs(ipp, subnets=ipp['subnets']) as ip_policy_create:\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[3, 4], exclude=['0.0.0.1/32'])))\n exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.255.255/32']\n ip_policy_create.assert_called_once_with(self.context, exclude=\n exclude, subnets=[{'cidr': '0.0.0.0/16', 'id': 3}, {'cidr':\n '0.0.0.0/16', 'id': 4}])\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp['subnet_ids'], [3, 4])\n self.assertEqual(resp['network_ids'], [])\n self.assertEqual(resp['exclude'], ['0.0.0.1/32'])\n self.assertEqual(resp['name'], 'foo')\n self.assertEqual(resp['tenant_id'], 1)\n\n\nclass TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, networks=None):\n if not subnets:\n subnets = []\n if not networks:\n networks = []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.network_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, network_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n network_find.return_value = networks\n yield ip_policy_update\n\n def test_update_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=None))\n\n def test_update_ip_policy_with_both_network_and_subnet_ids(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[1], subnet_ids=[1])))\n\n def test_update_ip_policy_subnets_not_found(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.SubnetNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets_already_exists(self):\n ipp = dict(id=1, subnets=[dict()])\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=None)]\n ) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_subnets_empty_exclude(self):\n ipp = dict(id=1, subnets=[dict()], exclude=['0.0.0.40/32'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=[dict(id=1, cidr='0.0.0.0/16',\n ip_policy=None)]) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.0/32', '0.0.255.255/32'])\n\n def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):\n ipp = dict(id=1, subnets=[dict(cidr='0.0.0.0/16')], exclude=[\n '0.0.0.40/32'], name='foo', tenant_id=1)\n with self._stubs(ipp) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(exclude=[])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n exclude=['0.0.0.0/32', '0.0.255.255/32'])\n\n def test_update_ip_policy_networks_not_found(self):\n ipp = dict(id=1, networks=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.NetworkNotFound):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[100])))\n\n def test_update_ip_policy_networks(self):\n ipp = dict(id=1, networks=[dict()], exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, networks=[dict(id=1, ip_policy=None)]\n ) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(network_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_exclude_v4(self):\n subnets = [dict(id=100, cidr='0.0.0.0/16')]\n ipp = dict(id=1, subnets=subnets, exclude=['0.0.0.0/24'], name=\n 'foo', tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=['0.0.0.1/32'])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['0.0.0.1/32', '0.0.0.0/32',\n '0.0.255.255/32'])\n\n def test_update_ip_policy_exclude_v6(self):\n subnets = [dict(id=100, cidr='::/64')]\n ipp = dict(id=1, subnets=subnets, exclude=['::/128'], name='foo',\n tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as ip_policy_update:\n self.plugin.update_ip_policy(self.context, 1, dict(ip_policy=\n dict(subnet_ids=[100], exclude=['::1/128'])))\n ip_policy_update.assert_called_once_with(self.context, ipp,\n subnet_ids=[100], exclude=['::1/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n\n\nclass TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy):\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.ip_policy_delete' % db_mod)) as (ip_policy_find,\n ip_policy_delete):\n ip_policy_find.return_value = ip_policy\n yield ip_policy_find, ip_policy_delete\n\n def test_delete_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy_in_use(self):\n with self._stubs(dict(networks=True)):\n with self.assertRaises(q_exc.IPPolicyInUse):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy(self):\n ip_policy = dict(id=1, networks=[], subnets=[])\n with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):\n self.plugin.delete_ip_policy(self.context, 1)\n self.assertEqual(ip_policy_find.call_count, 1)\n self.assertEqual(ip_policy_delete.call_count, 1)\n\n\nclass TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):\n\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, routes=None):\n subnets = subnets or []\n db_mod = 'quark.db.api'\n with contextlib.nested(mock.patch('%s.ip_policy_find' % db_mod),\n mock.patch('%s.subnet_find' % db_mod), mock.patch(\n '%s.route_find' % db_mod), mock.patch('%s.ip_policy_update' %\n db_mod)) as (ip_policy_find, subnet_find, route_find,\n ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n route_find.return_value = routes\n yield ip_policy_update\n\n def test_update_ip_policy_has_route_conflict_raises(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n with self.assertRaises(n_exc_ext.GatewayConflictWithAllocationPools\n ):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=[])))\n\n def test_update_ip_policy_no_route_conflict(self):\n subnet = dict(id=1, cidr='192.168.0.0/24')\n ipp = dict(id=1, subnets=[subnet], exclude=['192.168.0.1/32'], name\n ='foo', tenant_id=1)\n route = {'gateway': '192.168.0.1', 'subnet_id': subnet['id']}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n try:\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(subnet_ids=[1], exclude=['192.168.0.0/24']))\n )\n except Exception as e:\n self.fail(\"This shouldn't have raised: %s\" % e)\n\n\nclass TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):\n\n def test_normal_cidr_and_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='192.168.0.0/24')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_multiple_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='192.168.0.0/24'), dict(id=2, cidr=\n '192.168.0.0/16')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(['::/128'], [dict(id=1,\n cidr='::/96'), dict(id=2, cidr='::/64')])\n except Exception:\n self.fail('Should not have failed')\n\n def test_normal_cidr_and_invalid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24')])\n\n def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/64'], [dict(id=1,\n cidr='::/96')])\n\n def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['192.168.0.100/32'], [\n dict(id=1, cidr='10.10.10.0/24'), dict(id=1, cidr=\n '192.168.0.0/24')])\n\n def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(['::/127'], [dict(id=1,\n cidr='::/96'), dict(id=1, cidr='::/128')])\n\n\nclass TestQuarkEnsureDefaultPolicy(test_base.TestBase):\n\n def test_no_cidrs_no_subnets(self):\n cidrs = []\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [])\n self.assertEqual(subnets, [])\n\n def test_no_cidrs_v4(self):\n cidrs = []\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_subnets_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v4(self):\n cidrs = ['192.168.10.20/32', '192.168.10.40/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.20/32', '192.168.10.40/32',\n '192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_cidrs_with_default_cidrs_v4(self):\n cidrs = ['192.168.10.0/32', '192.168.10.255/32']\n subnets = [dict(cidr='192.168.10.1/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.0/32', '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.1/24')])\n\n def test_no_cidrs_v6(self):\n cidrs = []\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_subnets_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v6(self):\n cidrs = ['::10/128', '::20/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::10/128', '::20/128', '::/128',\n '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_cidrs_with_default_cidrs_v6(self):\n cidrs = ['::/128', '::ffff:ffff:ffff:ffff/128']\n subnets = [dict(cidr='::/64')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['::/128', '::ffff:ffff:ffff:ffff/128'])\n self.assertEqual(subnets, [dict(cidr='::/64')])\n\n def test_no_duplicates_in_result_when_called_twice(self):\n cidrs = ['192.168.10.10/32']\n subnets = [dict(cidr='192.168.10.0/24')]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n cidrs2 = ['192.168.10.10/32']\n self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))\n self.assertEqual(cidrs, ['192.168.10.10/32', '192.168.10.0/32',\n '192.168.10.255/32'])\n self.assertEqual(subnets, [dict(cidr='192.168.10.0/24')])\n",
"step-5": "# Copyright 2013 Rackspace Hosting Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\n\nimport mock\nfrom neutron.common import exceptions as n_exc_ext\nfrom neutron_lib import exceptions as n_exc\n\nfrom quark import exceptions as q_exc\nfrom quark.plugin_modules import ip_policies as ippol\nfrom quark.tests import test_base\nfrom quark.tests import test_quark_plugin\n\n\nclass TestQuarkGetIpPolicies(test_quark_plugin.TestQuarkPlugin):\n @contextlib.contextmanager\n def _stubs(self, ip_policy):\n db_mod = \"quark.db.api\"\n with mock.patch(\"%s.ip_policy_find\" % db_mod) as ip_policy_find:\n ip_policy_find.return_value = ip_policy\n yield\n\n def test_get_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.get_ip_policy(self.context, 1)\n\n def test_get_ip_policy(self):\n ip_policy = dict(\n id=1,\n tenant_id=1,\n name=\"foo\",\n subnets=[dict(id=1)],\n networks=[dict(id=2)],\n exclude=[dict(cidr=\"0.0.0.0/32\")])\n with self._stubs(ip_policy):\n resp = self.plugin.get_ip_policy(self.context, 1)\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp[\"id\"], 1)\n self.assertEqual(resp[\"name\"], \"foo\")\n self.assertEqual(resp[\"subnet_ids\"], [1])\n self.assertEqual(resp[\"network_ids\"], [2])\n self.assertEqual(resp[\"exclude\"], [\"0.0.0.0/32\"])\n self.assertEqual(resp[\"tenant_id\"], 1)\n\n def test_get_ip_policies(self):\n ip_policy = dict(\n id=1,\n tenant_id=1,\n name=\"foo\",\n subnets=[dict(id=1)],\n networks=[dict(id=2)],\n exclude=[dict(cidr=\"0.0.0.0/32\")])\n with self._stubs([ip_policy]):\n resp = self.plugin.get_ip_policies(self.context)\n self.assertEqual(len(resp), 1)\n resp = resp[0]\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp[\"id\"], 1)\n self.assertEqual(resp[\"subnet_ids\"], [1])\n self.assertEqual(resp[\"network_ids\"], [2])\n self.assertEqual(resp[\"exclude\"], [\"0.0.0.0/32\"])\n self.assertEqual(resp[\"name\"], \"foo\")\n self.assertEqual(resp[\"tenant_id\"], 1)\n\n\nclass TestQuarkCreateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, nets=None):\n db_mod = \"quark.db.api\"\n with contextlib.nested(\n mock.patch(\"%s.subnet_find\" % db_mod),\n mock.patch(\"%s.network_find\" % db_mod),\n mock.patch(\"%s.ip_policy_create\" % db_mod),\n mock.patch(\"%s.route_find\" % db_mod)\n ) as (subnet_find, net_find, ip_policy_create, route_find):\n subnet_find.return_value = subnets if subnets else None\n net_find.return_value = nets if nets else None\n ip_policy_create.return_value = ip_policy\n route_find.return_value = [{\"nexthop\": \"1.2.3.4\"}]\n yield ip_policy_create\n\n def test_create_ip_policy_invalid_body_missing_exclude(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict()))\n\n def test_create_ip_policy_with_both_network_and_subnet_ids(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[1], subnet_ids=[1])))\n\n def test_create_ip_policy_invalid_body_missing_netsubnet(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(exclude=[\"1.1.1.1/24\"])))\n\n def test_create_ip_policy_invalid_subnet(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.SubnetNotFound):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1],\n exclude=[\"1.1.1.1/24\"])))\n\n def test_create_ip_policy_invalid_network(self):\n with self._stubs(None):\n with self.assertRaises(n_exc.NetworkNotFound):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[1],\n exclude=[\"1.1.1.1/24\"])))\n\n def test_create_ip_policy_network_ip_policy_already_exists(self):\n with self._stubs(None, nets=[dict(id=1, ip_policy=dict(id=2),\n subnets=[dict(id=1,\n cidr=\"1.1.1.1/16\")])]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[1],\n exclude=[\"1.1.1.1/24\"])))\n\n def test_create_ip_policy_subnet_ip_policy_already_exists(self):\n with self._stubs(None, subnets=[dict(id=1, ip_policy=dict(id=2),\n cidr=\"1.1.1.1/16\")]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1],\n exclude=[\"1.1.1.1/24\"])))\n\n def test_create_ip_policy_network(self):\n ipp = dict(subnet_id=None, network_id=1,\n exclude=[\"1.1.1.1/24\"])\n with self._stubs(ipp, nets=[dict(id=1, ip_policy=dict(id=2),\n subnets=[dict(id=1,\n cidr=\"1.1.1.1/16\")])]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[ipp[\"network_id\"]],\n exclude=ipp[\"exclude\"])))\n\n def test_create_ip_policy_subnet(self):\n ipp = dict(subnet_id=1, network_id=None,\n exclude=[\"1.1.1.1/24\"])\n with self._stubs(ipp, subnets=[dict(id=1, ip_policy=dict(id=2),\n cidr=\"1.1.1.1/16\")]):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[ipp[\"subnet_id\"]],\n exclude=ipp[\"exclude\"])))\n\n def test_create_ip_policy_with_cidr_that_does_not_fit_into_subnet(self):\n ipp = dict(\n subnets=[dict(id=1, version=4, cidr=\"192.168.1.1/24\")],\n networks=[],\n id=1,\n tenant_id=1,\n exclude=[\"10.10.10.100/32\"],\n name=\"foo\")\n with self._stubs(ipp,\n subnets=[dict(id=1, ip_policy=None,\n version=ipp[\"subnets\"][0][\"version\"],\n cidr=ipp[\"subnets\"][0][\"cidr\"])]):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1],\n exclude=ipp[\"exclude\"])))\n\n def test_create_ip_policy_with_ipv6_subnet_cidr(self):\n ipp = dict(\n subnets=[dict(id=1, version=6, cidr='::/64')],\n networks=[],\n id=1,\n tenant_id=1,\n exclude=[dict(cidr=\"::/128\")],\n name=\"foo\")\n with self._stubs(ipp,\n subnets=[dict(id=1, ip_policy=None,\n version=ipp[\"subnets\"][0][\"version\"],\n cidr=ipp[\"subnets\"][0][\"cidr\"])]):\n exclude = [ippc[\"cidr\"] for ippc in ipp[\"exclude\"]]\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1], exclude=exclude)))\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp[\"subnet_ids\"], [1])\n self.assertEqual(resp[\"network_ids\"], [])\n # NOTE(jmeridth): below is mocked that way, so it won't get\n # additional default policies in exclude\n # ippol.ensure_default_policy is tested below in this file\n self.assertEqual(resp[\"exclude\"], [\"::/128\"])\n self.assertEqual(resp[\"name\"], \"foo\")\n self.assertEqual(resp[\"tenant_id\"], 1)\n\n def test_create_ip_policy(self):\n ipp = dict(\n subnets=[dict(id=1, cidr='0.0.0.0/16')],\n networks=[],\n id=1,\n tenant_id=1,\n exclude=[dict(cidr=\"0.0.0.0/24\")],\n name=\"foo\")\n with self._stubs(ipp, subnets=[dict(\n id=1, ip_policy=None, cidr=ipp[\"subnets\"][0][\"cidr\"])]):\n exclude = [ippc[\"cidr\"] for ippc in ipp[\"exclude\"]]\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[1], exclude=exclude)))\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp[\"subnet_ids\"], [1])\n self.assertEqual(resp[\"network_ids\"], [])\n # NOTE(jmeridth): below is mocked that way, so it won't get\n # additional default policies in exclude\n # ippol.ensure_default_policy is tested below in this file\n self.assertEqual(resp[\"exclude\"], [\"0.0.0.0/24\"])\n self.assertEqual(resp[\"name\"], \"foo\")\n self.assertEqual(resp[\"tenant_id\"], 1)\n\n def test_create_ip_policy_only_called_once_with_multiple_networks(self):\n ipp = dict(\n subnets=[],\n networks=[dict(id=1, subnets=[dict(id=1,\n ip_policy=None, cidr='0.0.0.0/24')]),\n dict(id=2, subnets=[dict(id=2,\n ip_policy=None, cidr='0.0.0.0/24')])],\n id=1,\n tenant_id=1,\n exclude=[dict(cidr=\"0.0.0.1/32\")],\n name=\"foo\")\n with self._stubs(ipp, nets=ipp[\"networks\"]) as (ip_policy_create):\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(network_ids=[1, 2], exclude=[\"0.0.0.1/32\"])))\n exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.0.255/32']\n ip_policy_create.assert_called_once_with(\n self.context, exclude=exclude,\n networks=[{'subnets':\n [{'cidr': '0.0.0.0/24', 'ip_policy': None,\n 'id': 1}], 'id': 1},\n {'subnets':\n [{'cidr': '0.0.0.0/24', 'ip_policy': None,\n 'id': 2}], 'id': 2}])\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp[\"subnet_ids\"], [])\n self.assertEqual(resp[\"network_ids\"], [1, 2])\n # NOTE(jmeridth): below is mocked that way, so it won't get\n # additional default policies in exclude\n # ippol.ensure_default_policy is tested below in this file\n self.assertEqual(resp[\"exclude\"], [\"0.0.0.1/32\"])\n self.assertEqual(resp[\"name\"], \"foo\")\n self.assertEqual(resp[\"tenant_id\"], 1)\n\n def test_create_ip_policy_only_called_once_with_multiple_subnets(self):\n ipp = dict(\n subnets=[dict(id=3, cidr='0.0.0.0/16'),\n dict(id=4, cidr='0.0.0.0/16')],\n networks=[],\n id=1,\n tenant_id=1,\n exclude=[dict(cidr=\"0.0.0.1/32\")],\n name=\"foo\")\n with self._stubs(ipp, subnets=ipp[\"subnets\"]) as (ip_policy_create):\n resp = self.plugin.create_ip_policy(self.context, dict(\n ip_policy=dict(subnet_ids=[3, 4], exclude=[\"0.0.0.1/32\"])))\n exclude = ['0.0.0.1/32', '0.0.0.0/32', '0.0.255.255/32']\n ip_policy_create.assert_called_once_with(\n self.context, exclude=exclude,\n subnets=[{'cidr': '0.0.0.0/16', 'id': 3},\n {'cidr': '0.0.0.0/16', 'id': 4}])\n self.assertEqual(len(resp.keys()), 6)\n self.assertEqual(resp[\"subnet_ids\"], [3, 4])\n self.assertEqual(resp[\"network_ids\"], [])\n # NOTE(jmeridth): below is mocked that way, so it won't get\n # additional default policies in exclude\n # ippol.ensure_default_policy is tested below in this file\n self.assertEqual(resp[\"exclude\"], [\"0.0.0.1/32\"])\n self.assertEqual(resp[\"name\"], \"foo\")\n self.assertEqual(resp[\"tenant_id\"], 1)\n\n\nclass TestQuarkUpdateIpPolicies(test_quark_plugin.TestQuarkPlugin):\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, networks=None):\n if not subnets:\n subnets = []\n if not networks:\n networks = []\n db_mod = \"quark.db.api\"\n with contextlib.nested(\n mock.patch(\"%s.ip_policy_find\" % db_mod),\n mock.patch(\"%s.subnet_find\" % db_mod),\n mock.patch(\"%s.network_find\" % db_mod),\n mock.patch(\"%s.ip_policy_update\" % db_mod),\n ) as (ip_policy_find, subnet_find, network_find, ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n network_find.return_value = networks\n yield ip_policy_update\n\n def test_update_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.update_ip_policy(self.context, 1,\n dict(ip_policy=None))\n\n def test_update_ip_policy_with_both_network_and_subnet_ids(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.BadRequest):\n self.plugin.update_ip_policy(self.context, 1, dict(\n ip_policy=dict(network_ids=[1], subnet_ids=[1])))\n\n def test_update_ip_policy_subnets_not_found(self):\n ipp = dict(id=1, subnets=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.SubnetNotFound):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets_already_exists(self):\n ipp = dict(id=1, subnets=[dict()])\n with self._stubs(\n ipp, subnets=[dict(id=1, ip_policy=dict(id=1))]\n ):\n with self.assertRaises(q_exc.IPPolicyAlreadyExists):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(subnet_ids=[100])))\n\n def test_update_ip_policy_subnets(self):\n ipp = dict(id=1, subnets=[dict()],\n exclude=[\"0.0.0.0/24\"],\n name=\"foo\", tenant_id=1)\n with self._stubs(\n ipp, subnets=[dict(id=1, ip_policy=None)]\n ) as (ip_policy_update):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(subnet_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_subnets_empty_exclude(self):\n ipp = dict(id=1, subnets=[dict()],\n exclude=[\"0.0.0.40/32\"],\n name=\"foo\", tenant_id=1)\n with self._stubs(\n ipp, subnets=[dict(id=1, cidr=\"0.0.0.0/16\", ip_policy=None)]\n ) as (ip_policy_update):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(subnet_ids=[100], exclude=[])))\n ip_policy_update.assert_called_once_with(\n self.context, ipp, subnet_ids=[100], exclude=[\n \"0.0.0.0/32\", \"0.0.255.255/32\"])\n\n def test_update_ip_policy_subnets_empty_exclude_without_subnet_ids(self):\n ipp = dict(id=1, subnets=[dict(cidr=\"0.0.0.0/16\")],\n exclude=[\"0.0.0.40/32\"],\n name=\"foo\", tenant_id=1)\n with self._stubs(ipp) as (ip_policy_update):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(exclude=[])))\n ip_policy_update.assert_called_once_with(\n self.context, ipp, exclude=[\"0.0.0.0/32\", \"0.0.255.255/32\"])\n\n def test_update_ip_policy_networks_not_found(self):\n ipp = dict(id=1, networks=[])\n with self._stubs(ipp):\n with self.assertRaises(n_exc.NetworkNotFound):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(network_ids=[100])))\n\n def test_update_ip_policy_networks(self):\n ipp = dict(id=1, networks=[dict()],\n exclude=[\"0.0.0.0/24\"],\n name=\"foo\", tenant_id=1)\n with self._stubs(\n ipp, networks=[dict(id=1, ip_policy=None)]\n ) as (ip_policy_update):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(network_ids=[100])))\n self.assertEqual(ip_policy_update.called, 1)\n\n def test_update_ip_policy_exclude_v4(self):\n subnets = [dict(id=100, cidr=\"0.0.0.0/16\")]\n ipp = dict(id=1, subnets=subnets,\n exclude=[\"0.0.0.0/24\"],\n name=\"foo\", tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as (ip_policy_update):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(subnet_ids=[100], exclude=[\"0.0.0.1/32\"])))\n ip_policy_update.assert_called_once_with(\n self.context,\n ipp,\n subnet_ids=[100],\n exclude=[\"0.0.0.1/32\", \"0.0.0.0/32\", \"0.0.255.255/32\"])\n\n def test_update_ip_policy_exclude_v6(self):\n subnets = [dict(id=100, cidr=\"::/64\")]\n ipp = dict(id=1, subnets=subnets,\n exclude=[\"::/128\"],\n name=\"foo\", tenant_id=1)\n with self._stubs(ipp, subnets=subnets) as (ip_policy_update):\n self.plugin.update_ip_policy(\n self.context,\n 1,\n dict(ip_policy=dict(subnet_ids=[100], exclude=[\"::1/128\"])))\n ip_policy_update.assert_called_once_with(\n self.context,\n ipp,\n subnet_ids=[100],\n exclude=[\"::1/128\", \"::/128\", \"::ffff:ffff:ffff:ffff/128\"])\n\n\nclass TestQuarkDeleteIpPolicies(test_quark_plugin.TestQuarkPlugin):\n @contextlib.contextmanager\n def _stubs(self, ip_policy):\n db_mod = \"quark.db.api\"\n with contextlib.nested(\n mock.patch(\"%s.ip_policy_find\" % db_mod),\n mock.patch(\"%s.ip_policy_delete\" % db_mod),\n ) as (ip_policy_find, ip_policy_delete):\n ip_policy_find.return_value = ip_policy\n yield ip_policy_find, ip_policy_delete\n\n def test_delete_ip_policy_not_found(self):\n with self._stubs(None):\n with self.assertRaises(q_exc.IPPolicyNotFound):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy_in_use(self):\n with self._stubs(dict(networks=True)):\n with self.assertRaises(q_exc.IPPolicyInUse):\n self.plugin.delete_ip_policy(self.context, 1)\n\n def test_delete_ip_policy(self):\n ip_policy = dict(\n id=1,\n networks=[],\n subnets=[])\n with self._stubs(ip_policy) as (ip_policy_find, ip_policy_delete):\n self.plugin.delete_ip_policy(self.context, 1)\n self.assertEqual(ip_policy_find.call_count, 1)\n self.assertEqual(ip_policy_delete.call_count, 1)\n\n\nclass TestQuarkUpdatePolicySubnetWithRoutes(test_quark_plugin.TestQuarkPlugin):\n @contextlib.contextmanager\n def _stubs(self, ip_policy, subnets=None, routes=None):\n subnets = subnets or []\n db_mod = \"quark.db.api\"\n with contextlib.nested(\n mock.patch(\"%s.ip_policy_find\" % db_mod),\n mock.patch(\"%s.subnet_find\" % db_mod),\n mock.patch(\"%s.route_find\" % db_mod),\n mock.patch(\"%s.ip_policy_update\" % db_mod),\n ) as (ip_policy_find, subnet_find, route_find, ip_policy_update):\n ip_policy_find.return_value = ip_policy\n subnet_find.return_value = subnets\n route_find.return_value = routes\n yield ip_policy_update\n\n def test_update_ip_policy_has_route_conflict_raises(self):\n subnet = dict(id=1, cidr=\"192.168.0.0/24\")\n ipp = dict(id=1, subnets=[subnet], exclude=[\"192.168.0.1/32\"],\n name=\"foo\", tenant_id=1)\n route = {\"gateway\": \"192.168.0.1\", \"subnet_id\": subnet[\"id\"]}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n with self.assertRaises(\n n_exc_ext.GatewayConflictWithAllocationPools):\n self.plugin.update_ip_policy(\n self.context, 1,\n dict(ip_policy=dict(subnet_ids=[1], exclude=[])))\n\n def test_update_ip_policy_no_route_conflict(self):\n subnet = dict(id=1, cidr=\"192.168.0.0/24\")\n ipp = dict(id=1, subnets=[subnet], exclude=[\"192.168.0.1/32\"],\n name=\"foo\", tenant_id=1)\n route = {\"gateway\": \"192.168.0.1\", \"subnet_id\": subnet[\"id\"]}\n with self._stubs(ipp, subnets=[subnet], routes=[route]):\n try:\n self.plugin.update_ip_policy(\n self.context, 1,\n dict(ip_policy=dict(subnet_ids=[1],\n exclude=[\"192.168.0.0/24\"])))\n except Exception as e:\n self.fail(\"This shouldn't have raised: %s\" % e)\n\n\nclass TestQuarkValidateCIDRsFitsIntoSubnets(test_quark_plugin.TestQuarkPlugin):\n def test_normal_cidr_and_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(\n [\"192.168.0.100/32\"],\n [dict(id=1, cidr=\"192.168.0.0/24\")])\n except Exception:\n self.fail(\"Should not have failed\")\n\n def test_normal_ipv4_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(\n [\"192.168.0.100/32\"], [dict(id=1, cidr=\"::/96\")])\n except Exception:\n self.fail(\"Should not have failed\")\n\n def test_normal_ipv6_cidr_and_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(\n [\"::/128\"], [dict(id=1, cidr=\"::/96\")])\n except Exception:\n self.fail(\"Should not have failed\")\n\n def test_normal_ipv6_cidr_and_valid_ipv4_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(\n [\"::/128\"], [dict(id=1, cidr=\"192.168.0.0/24\")])\n except Exception:\n self.fail(\"Should not have failed\")\n\n def test_normal_cidr_and_multiple_valid_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(\n [\"192.168.0.100/32\"],\n [dict(id=1, cidr=\"192.168.0.0/24\"),\n dict(id=2, cidr=\"192.168.0.0/16\")])\n except Exception:\n self.fail(\"Should not have failed\")\n\n def test_normal_ipv6_cidr_and_multiple_valid_ipv6_subnet(self):\n try:\n ippol._validate_cidrs_fit_into_subnets(\n [\"::/128\"],\n [dict(id=1, cidr=\"::/96\"),\n dict(id=2, cidr=\"::/64\")])\n except Exception:\n self.fail(\"Should not have failed\")\n\n def test_normal_cidr_and_invalid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(\n [\"192.168.0.100/32\"],\n [dict(id=1, cidr=\"10.10.10.0/24\")])\n\n def test_normal_ipv6_cidr_and_invalid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(\n [\"::/64\"], [dict(id=1, cidr=\"::/96\")])\n\n def test_normal_cidr_and_one_invalid_and_one_valid_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(\n [\"192.168.0.100/32\"],\n [dict(id=1, cidr=\"10.10.10.0/24\"),\n dict(id=1, cidr=\"192.168.0.0/24\")])\n\n def test_normal_ipv6_cidr_and_one_invalid_and_one_valid_ipv6_subnet(self):\n with self.assertRaises(n_exc.BadRequest):\n ippol._validate_cidrs_fit_into_subnets(\n [\"::/127\"],\n [dict(id=1, cidr=\"::/96\"),\n dict(id=1, cidr=\"::/128\")])\n\n\nclass TestQuarkEnsureDefaultPolicy(test_base.TestBase):\n def test_no_cidrs_no_subnets(self):\n cidrs = []\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [])\n self.assertEqual(subnets, [])\n\n def test_no_cidrs_v4(self):\n cidrs = []\n subnets = [dict(cidr=\"192.168.10.1/24\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"192.168.10.0/32\", \"192.168.10.255/32\"])\n self.assertEqual(subnets, [dict(cidr=\"192.168.10.1/24\")])\n\n def test_no_subnets_v4(self):\n cidrs = [\"192.168.10.0/32\", \"192.168.10.255/32\"]\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"192.168.10.0/32\", \"192.168.10.255/32\"])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v4(self):\n cidrs = [\"192.168.10.20/32\", \"192.168.10.40/32\"]\n subnets = [dict(cidr=\"192.168.10.1/24\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"192.168.10.20/32\", \"192.168.10.40/32\",\n \"192.168.10.0/32\", \"192.168.10.255/32\"])\n self.assertEqual(subnets, [dict(cidr=\"192.168.10.1/24\")])\n\n def test_cidrs_with_default_cidrs_v4(self):\n cidrs = [\"192.168.10.0/32\", \"192.168.10.255/32\"]\n subnets = [dict(cidr=\"192.168.10.1/24\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"192.168.10.0/32\", \"192.168.10.255/32\"])\n self.assertEqual(subnets, [dict(cidr=\"192.168.10.1/24\")])\n\n def test_no_cidrs_v6(self):\n cidrs = []\n subnets = [dict(cidr=\"::/64\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"::/128\", \"::ffff:ffff:ffff:ffff/128\"])\n self.assertEqual(subnets, [dict(cidr=\"::/64\")])\n\n def test_no_subnets_v6(self):\n cidrs = [\"::/128\", \"::ffff:ffff:ffff:ffff/128\"]\n subnets = []\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"::/128\", \"::ffff:ffff:ffff:ffff/128\"])\n self.assertEqual(subnets, [])\n\n def test_cidrs_without_default_cidrs_v6(self):\n cidrs = [\"::10/128\", \"::20/128\"]\n subnets = [dict(cidr=\"::/64\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"::10/128\", \"::20/128\",\n \"::/128\", \"::ffff:ffff:ffff:ffff/128\"])\n self.assertEqual(subnets, [dict(cidr=\"::/64\")])\n\n def test_cidrs_with_default_cidrs_v6(self):\n cidrs = [\"::/128\", \"::ffff:ffff:ffff:ffff/128\"]\n subnets = [dict(cidr=\"::/64\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"::/128\", \"::ffff:ffff:ffff:ffff/128\"])\n self.assertEqual(subnets, [dict(cidr=\"::/64\")])\n\n def test_no_duplicates_in_result_when_called_twice(self):\n cidrs = [\"192.168.10.10/32\"]\n subnets = [dict(cidr=\"192.168.10.0/24\")]\n self.assertIsNone(ippol.ensure_default_policy(cidrs, subnets))\n self.assertEqual(cidrs, [\"192.168.10.10/32\", \"192.168.10.0/32\",\n \"192.168.10.255/32\"])\n cidrs2 = [\"192.168.10.10/32\"]\n self.assertIsNone(ippol.ensure_default_policy(cidrs2, subnets))\n self.assertEqual(cidrs, [\"192.168.10.10/32\", \"192.168.10.0/32\",\n \"192.168.10.255/32\"])\n self.assertEqual(subnets, [dict(cidr=\"192.168.10.0/24\")])\n",
"step-ids": [
37,
43,
48,
57,
67
]
}
|
[
37,
43,
48,
57,
67
] |
from itertools import cycle
STEP_VAL = 376
spinlock = []
for count in range(2018):
len(spinlock) % count
|
normal
|
{
"blob_id": "c3755ff5d4262dbf6eaf3df58a336f5e61531435",
"index": 5149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor count in range(2018):\n len(spinlock) % count\n",
"step-3": "<mask token>\nSTEP_VAL = 376\nspinlock = []\nfor count in range(2018):\n len(spinlock) % count\n",
"step-4": "from itertools import cycle\nSTEP_VAL = 376\nspinlock = []\nfor count in range(2018):\n len(spinlock) % count\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.forms import ModelForm
from contactform.models import ContactRequest
class ContactRequestForm(ModelForm):
class Meta:
model = ContactRequest
|
normal
|
{
"blob_id": "97637e2114254b41ef6e777e60b3ddab1d4622e8",
"index": 4606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ContactRequestForm(ModelForm):\n\n\n class Meta:\n model = ContactRequest\n",
"step-3": "from django.forms import ModelForm\nfrom contactform.models import ContactRequest\n\n\nclass ContactRequestForm(ModelForm):\n\n\n class Meta:\n model = ContactRequest\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def create_new_report(chrome_driver_inner, report_info_inner):
add_new_report = chrome_driver_inner.find_element_by_id(
'MainContent_MainActionCreate')
add_new_report.click()
next_button = chrome_driver_inner.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
name_text = chrome_driver_inner.find_element_by_id(
'MainContent_ClientProjectName')
name_text.clear()
name_text.send_keys('{} - {} - {}'.format(report_info_inner[
'new_report_string'], report_info_inner['start_date'],
report_info_inner['end_date']))
start_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_StartDate_input')
start_date_text.clear()
start_date_text.send_keys(report_info_inner['start_date'])
end_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_EndDate_input')
end_date_text.clear()
end_date_text.send_keys(report_info_inner['end_date'])
def execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):
if report_filename and not report_info:
with open(report_filename, 'r') as input_file:
report_info = json.load(input_file)
report_info['password'] = ''
report_info['user_name'] = ''
file_name = report_info['reconciliation_report_location']
excel_file = pandas.ExcelFile(file_name)
pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)
recon_df = pcard_df['PCard Reconciliation Report']
names = recon_df['Employee Name'].dropna().unique()
chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),
'chromedriver.exe'))
did_not_finish_list = []
finished_users = []
logon_website = report_info['logon_website']
chrome_driver.get(logon_website)
chrome_driver.find_element_by_id('userNameInput').send_keys(report_info
['email_address'])
chrome_driver.find_element_by_id('passwordInput').send_keys(report_info
['password'])
chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)
chosen_names = names
for current_id, the_name in enumerate(chosen_names):
chrome_driver.implicitly_wait(0)
print('Processing user {} of {}, {}'.format(current_id + 1, len(
chosen_names), the_name))
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
configuration_link = chrome_driver.find_element_by_id(
'topNavToolsConfigurationLink')
configuration_link.click()
view_and_edit_users = chrome_driver.find_element_by_id(
'MainContent_ctl69')
view_and_edit_users.click()
last_name = chrome_driver.find_element_by_id('MainContent_LName')
last_name_str = the_name.split()[1]
last_name.send_keys(last_name_str)
last_name.send_keys(Keys.ENTER)
user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = "{}"]'
.format(the_name))
edit_user = user_tag.find_elements_by_xpath(
"../..//img[@src='images/16_edit.png']")
edit_user[0].click()
switch_user = chrome_driver.find_element_by_link_text(
'Switch to this User')
switch_user.click()
more_items = chrome_driver.find_element_by_id(
'MainContent_lblWalletMoreItems')
more_items.click()
transaction_list = chrome_driver.find_elements_by_xpath(
"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']"
)
for i_val in transaction_list:
i_val.find_element_by_xpath("../..//input[@type='checkbox']"
).click()
try:
add_content = chrome_driver.find_element_by_id('MainContent_Add')
add_content.click()
except:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(int(report_info['wait_time']))
try:
add_to_existing = chrome_driver.find_element_by_id(
'MainContent_MainActionAdd')
add_to_existing.click()
except NoSuchElementException:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(0)
if add_to_existing.get_attribute('disabled') == 'true':
create_new_report(chrome_driver, report_info)
else:
next_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
selected_report = Select(chrome_driver.find_element_by_id(
'MainContent_SelectedExpenseReport'))
try:
selected_report.select_by_visible_text('{} - {} - {}'.
format(report_info['report_executive_string'],
report_info['start_date'], report_info['end_date']))
except NoSuchElementException:
back_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Back')
back_button.click()
create_new_report(chrome_driver, report_info)
next_button_2 = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button_2.click()
finished_users.append(the_name)
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
print('Did not finish: {}'.format(did_not_finish_list))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_new_report(chrome_driver_inner, report_info_inner):
add_new_report = chrome_driver_inner.find_element_by_id(
'MainContent_MainActionCreate')
add_new_report.click()
next_button = chrome_driver_inner.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
name_text = chrome_driver_inner.find_element_by_id(
'MainContent_ClientProjectName')
name_text.clear()
name_text.send_keys('{} - {} - {}'.format(report_info_inner[
'new_report_string'], report_info_inner['start_date'],
report_info_inner['end_date']))
start_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_StartDate_input')
start_date_text.clear()
start_date_text.send_keys(report_info_inner['start_date'])
end_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_EndDate_input')
end_date_text.clear()
end_date_text.send_keys(report_info_inner['end_date'])
def execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):
if report_filename and not report_info:
with open(report_filename, 'r') as input_file:
report_info = json.load(input_file)
report_info['password'] = ''
report_info['user_name'] = ''
file_name = report_info['reconciliation_report_location']
excel_file = pandas.ExcelFile(file_name)
pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)
recon_df = pcard_df['PCard Reconciliation Report']
names = recon_df['Employee Name'].dropna().unique()
chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),
'chromedriver.exe'))
did_not_finish_list = []
finished_users = []
logon_website = report_info['logon_website']
chrome_driver.get(logon_website)
chrome_driver.find_element_by_id('userNameInput').send_keys(report_info
['email_address'])
chrome_driver.find_element_by_id('passwordInput').send_keys(report_info
['password'])
chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)
chosen_names = names
for current_id, the_name in enumerate(chosen_names):
chrome_driver.implicitly_wait(0)
print('Processing user {} of {}, {}'.format(current_id + 1, len(
chosen_names), the_name))
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
configuration_link = chrome_driver.find_element_by_id(
'topNavToolsConfigurationLink')
configuration_link.click()
view_and_edit_users = chrome_driver.find_element_by_id(
'MainContent_ctl69')
view_and_edit_users.click()
last_name = chrome_driver.find_element_by_id('MainContent_LName')
last_name_str = the_name.split()[1]
last_name.send_keys(last_name_str)
last_name.send_keys(Keys.ENTER)
user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = "{}"]'
.format(the_name))
edit_user = user_tag.find_elements_by_xpath(
"../..//img[@src='images/16_edit.png']")
edit_user[0].click()
switch_user = chrome_driver.find_element_by_link_text(
'Switch to this User')
switch_user.click()
more_items = chrome_driver.find_element_by_id(
'MainContent_lblWalletMoreItems')
more_items.click()
transaction_list = chrome_driver.find_elements_by_xpath(
"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']"
)
for i_val in transaction_list:
i_val.find_element_by_xpath("../..//input[@type='checkbox']"
).click()
try:
add_content = chrome_driver.find_element_by_id('MainContent_Add')
add_content.click()
except:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(int(report_info['wait_time']))
try:
add_to_existing = chrome_driver.find_element_by_id(
'MainContent_MainActionAdd')
add_to_existing.click()
except NoSuchElementException:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(0)
if add_to_existing.get_attribute('disabled') == 'true':
create_new_report(chrome_driver, report_info)
else:
next_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
selected_report = Select(chrome_driver.find_element_by_id(
'MainContent_SelectedExpenseReport'))
try:
selected_report.select_by_visible_text('{} - {} - {}'.
format(report_info['report_executive_string'],
report_info['start_date'], report_info['end_date']))
except NoSuchElementException:
back_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Back')
back_button.click()
create_new_report(chrome_driver, report_info)
next_button_2 = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button_2.click()
finished_users.append(the_name)
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
print('Did not finish: {}'.format(did_not_finish_list))
if __name__ == '__main__':
execute_expense_report()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')
def create_new_report(chrome_driver_inner, report_info_inner):
add_new_report = chrome_driver_inner.find_element_by_id(
'MainContent_MainActionCreate')
add_new_report.click()
next_button = chrome_driver_inner.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
name_text = chrome_driver_inner.find_element_by_id(
'MainContent_ClientProjectName')
name_text.clear()
name_text.send_keys('{} - {} - {}'.format(report_info_inner[
'new_report_string'], report_info_inner['start_date'],
report_info_inner['end_date']))
start_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_StartDate_input')
start_date_text.clear()
start_date_text.send_keys(report_info_inner['start_date'])
end_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_EndDate_input')
end_date_text.clear()
end_date_text.send_keys(report_info_inner['end_date'])
def execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):
if report_filename and not report_info:
with open(report_filename, 'r') as input_file:
report_info = json.load(input_file)
report_info['password'] = ''
report_info['user_name'] = ''
file_name = report_info['reconciliation_report_location']
excel_file = pandas.ExcelFile(file_name)
pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)
recon_df = pcard_df['PCard Reconciliation Report']
names = recon_df['Employee Name'].dropna().unique()
chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),
'chromedriver.exe'))
did_not_finish_list = []
finished_users = []
logon_website = report_info['logon_website']
chrome_driver.get(logon_website)
chrome_driver.find_element_by_id('userNameInput').send_keys(report_info
['email_address'])
chrome_driver.find_element_by_id('passwordInput').send_keys(report_info
['password'])
chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)
chosen_names = names
for current_id, the_name in enumerate(chosen_names):
chrome_driver.implicitly_wait(0)
print('Processing user {} of {}, {}'.format(current_id + 1, len(
chosen_names), the_name))
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
configuration_link = chrome_driver.find_element_by_id(
'topNavToolsConfigurationLink')
configuration_link.click()
view_and_edit_users = chrome_driver.find_element_by_id(
'MainContent_ctl69')
view_and_edit_users.click()
last_name = chrome_driver.find_element_by_id('MainContent_LName')
last_name_str = the_name.split()[1]
last_name.send_keys(last_name_str)
last_name.send_keys(Keys.ENTER)
user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = "{}"]'
.format(the_name))
edit_user = user_tag.find_elements_by_xpath(
"../..//img[@src='images/16_edit.png']")
edit_user[0].click()
switch_user = chrome_driver.find_element_by_link_text(
'Switch to this User')
switch_user.click()
more_items = chrome_driver.find_element_by_id(
'MainContent_lblWalletMoreItems')
more_items.click()
transaction_list = chrome_driver.find_elements_by_xpath(
"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']"
)
for i_val in transaction_list:
i_val.find_element_by_xpath("../..//input[@type='checkbox']"
).click()
try:
add_content = chrome_driver.find_element_by_id('MainContent_Add')
add_content.click()
except:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(int(report_info['wait_time']))
try:
add_to_existing = chrome_driver.find_element_by_id(
'MainContent_MainActionAdd')
add_to_existing.click()
except NoSuchElementException:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(0)
if add_to_existing.get_attribute('disabled') == 'true':
create_new_report(chrome_driver, report_info)
else:
next_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
selected_report = Select(chrome_driver.find_element_by_id(
'MainContent_SelectedExpenseReport'))
try:
selected_report.select_by_visible_text('{} - {} - {}'.
format(report_info['report_executive_string'],
report_info['start_date'], report_info['end_date']))
except NoSuchElementException:
back_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Back')
back_button.click()
create_new_report(chrome_driver, report_info)
next_button_2 = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button_2.click()
finished_users.append(the_name)
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
print('Did not finish: {}'.format(did_not_finish_list))
if __name__ == '__main__':
execute_expense_report()
<|reserved_special_token_1|>
import pandas
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import json
CONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')
def create_new_report(chrome_driver_inner, report_info_inner):
add_new_report = chrome_driver_inner.find_element_by_id(
'MainContent_MainActionCreate')
add_new_report.click()
next_button = chrome_driver_inner.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
name_text = chrome_driver_inner.find_element_by_id(
'MainContent_ClientProjectName')
name_text.clear()
name_text.send_keys('{} - {} - {}'.format(report_info_inner[
'new_report_string'], report_info_inner['start_date'],
report_info_inner['end_date']))
start_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_StartDate_input')
start_date_text.clear()
start_date_text.send_keys(report_info_inner['start_date'])
end_date_text = chrome_driver_inner.find_element_by_id(
'MainContent_EndDate_input')
end_date_text.clear()
end_date_text.send_keys(report_info_inner['end_date'])
def execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):
if report_filename and not report_info:
with open(report_filename, 'r') as input_file:
report_info = json.load(input_file)
report_info['password'] = ''
report_info['user_name'] = ''
file_name = report_info['reconciliation_report_location']
excel_file = pandas.ExcelFile(file_name)
pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)
recon_df = pcard_df['PCard Reconciliation Report']
names = recon_df['Employee Name'].dropna().unique()
chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),
'chromedriver.exe'))
did_not_finish_list = []
finished_users = []
logon_website = report_info['logon_website']
chrome_driver.get(logon_website)
chrome_driver.find_element_by_id('userNameInput').send_keys(report_info
['email_address'])
chrome_driver.find_element_by_id('passwordInput').send_keys(report_info
['password'])
chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)
chosen_names = names
for current_id, the_name in enumerate(chosen_names):
chrome_driver.implicitly_wait(0)
print('Processing user {} of {}, {}'.format(current_id + 1, len(
chosen_names), the_name))
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
configuration_link = chrome_driver.find_element_by_id(
'topNavToolsConfigurationLink')
configuration_link.click()
view_and_edit_users = chrome_driver.find_element_by_id(
'MainContent_ctl69')
view_and_edit_users.click()
last_name = chrome_driver.find_element_by_id('MainContent_LName')
last_name_str = the_name.split()[1]
last_name.send_keys(last_name_str)
last_name.send_keys(Keys.ENTER)
user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = "{}"]'
.format(the_name))
edit_user = user_tag.find_elements_by_xpath(
"../..//img[@src='images/16_edit.png']")
edit_user[0].click()
switch_user = chrome_driver.find_element_by_link_text(
'Switch to this User')
switch_user.click()
more_items = chrome_driver.find_element_by_id(
'MainContent_lblWalletMoreItems')
more_items.click()
transaction_list = chrome_driver.find_elements_by_xpath(
"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']"
)
for i_val in transaction_list:
i_val.find_element_by_xpath("../..//input[@type='checkbox']"
).click()
try:
add_content = chrome_driver.find_element_by_id('MainContent_Add')
add_content.click()
except:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(int(report_info['wait_time']))
try:
add_to_existing = chrome_driver.find_element_by_id(
'MainContent_MainActionAdd')
add_to_existing.click()
except NoSuchElementException:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(0)
if add_to_existing.get_attribute('disabled') == 'true':
create_new_report(chrome_driver, report_info)
else:
next_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button.click()
selected_report = Select(chrome_driver.find_element_by_id(
'MainContent_SelectedExpenseReport'))
try:
selected_report.select_by_visible_text('{} - {} - {}'.
format(report_info['report_executive_string'],
report_info['start_date'], report_info['end_date']))
except NoSuchElementException:
back_button = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Back')
back_button.click()
create_new_report(chrome_driver, report_info)
next_button_2 = chrome_driver.find_element_by_id(
'MainContent_AAWiz__Next')
next_button_2.click()
finished_users.append(the_name)
current_user_dropdown = Select(chrome_driver.find_element_by_id(
'CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
print('Did not finish: {}'.format(did_not_finish_list))
if __name__ == '__main__':
execute_expense_report()
<|reserved_special_token_1|>
import pandas
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import json
CONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')
def create_new_report(chrome_driver_inner, report_info_inner):
add_new_report = chrome_driver_inner.find_element_by_id('MainContent_MainActionCreate')
add_new_report.click()
next_button = chrome_driver_inner.find_element_by_id('MainContent_AAWiz__Next')
next_button.click()
name_text = chrome_driver_inner.find_element_by_id('MainContent_ClientProjectName')
name_text.clear()
name_text.send_keys('{} - {} - {}'.format(report_info_inner['new_report_string'],
report_info_inner['start_date'],
report_info_inner['end_date']))
start_date_text = chrome_driver_inner.find_element_by_id('MainContent_StartDate_input')
start_date_text.clear()
start_date_text.send_keys(report_info_inner['start_date'])
end_date_text = chrome_driver_inner.find_element_by_id('MainContent_EndDate_input')
end_date_text.clear()
end_date_text.send_keys(report_info_inner['end_date'])
def execute_expense_report(report_filename=CONFIG_FILE_NAME,
report_info=None):
if report_filename and not report_info:
with open(report_filename, 'r') as input_file:
report_info = json.load(input_file)
report_info['password'] = ''
report_info['user_name'] = ''
file_name = report_info['reconciliation_report_location']
excel_file = pandas.ExcelFile(file_name)
pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)
recon_df = pcard_df['PCard Reconciliation Report']
names = recon_df['Employee Name'].dropna().unique()
chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__), 'chromedriver.exe'))
did_not_finish_list= []
finished_users =[]
logon_website = report_info['logon_website']
chrome_driver.get(logon_website)
chrome_driver.find_element_by_id('userNameInput').send_keys(report_info['email_address'])
chrome_driver.find_element_by_id('passwordInput').send_keys(report_info['password'])
chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)
chosen_names = names
for current_id, the_name in enumerate(chosen_names):
chrome_driver.implicitly_wait(0)
print('Processing user {} of {}, {}'.format(current_id+1, len(chosen_names), the_name))
current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
configuration_link = chrome_driver.find_element_by_id('topNavToolsConfigurationLink')
configuration_link.click()
view_and_edit_users = chrome_driver.find_element_by_id('MainContent_ctl69')
view_and_edit_users.click()
last_name = chrome_driver.find_element_by_id('MainContent_LName')
last_name_str = the_name.split()[1]
last_name.send_keys(last_name_str)
last_name.send_keys(Keys.ENTER)
user_tag = chrome_driver.find_element_by_xpath("//nobr[text() = \"{}\"]".format(the_name))
edit_user = user_tag.find_elements_by_xpath("../..//img[@src='images/16_edit.png']")
edit_user[0].click()
switch_user = chrome_driver.find_element_by_link_text('Switch to this User')
switch_user.click()
more_items = chrome_driver.find_element_by_id('MainContent_lblWalletMoreItems')
more_items.click()
transaction_list = chrome_driver.find_elements_by_xpath("//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']")
for i_val in transaction_list:
i_val.find_element_by_xpath("../..//input[@type='checkbox']").click()
try:
add_content = chrome_driver.find_element_by_id('MainContent_Add')
add_content.click()
except:
did_not_finish_list.append(the_name)
continue
#time.sleep(3)
chrome_driver.implicitly_wait(int(report_info['wait_time']))
try:
add_to_existing = chrome_driver.find_element_by_id('MainContent_MainActionAdd')
add_to_existing.click()
except NoSuchElementException:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(0)
if add_to_existing.get_attribute('disabled') == 'true':
create_new_report(chrome_driver, report_info)
else:
next_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Next')
next_button.click()
selected_report = Select(chrome_driver.find_element_by_id('MainContent_SelectedExpenseReport'))
try:
selected_report.select_by_visible_text('{} - {} - {}'.format(report_info['report_executive_string'],
report_info['start_date'],
report_info['end_date']))
except NoSuchElementException:
back_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Back')
back_button.click()
create_new_report(chrome_driver, report_info)
next_button_2= chrome_driver.find_element_by_id('MainContent_AAWiz__Next')
next_button_2.click()
finished_users.append(the_name)
current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
print('Did not finish: {}'.format(did_not_finish_list))
if __name__ == '__main__':
execute_expense_report()
|
flexible
|
{
"blob_id": "14cb702054b8caaa8899a2a3d8b65aae9b063cb6",
"index": 5600,
"step-1": "<mask token>\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()\n",
"step-3": "<mask token>\nCONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()\n",
"step-4": "import pandas\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nimport json\nCONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()\n",
"step-5": "import pandas\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nimport json\n\nCONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n\n add_new_report = chrome_driver_inner.find_element_by_id('MainContent_MainActionCreate')\n add_new_report.click()\n\n next_button = chrome_driver_inner.find_element_by_id('MainContent_AAWiz__Next')\n next_button.click()\n\n name_text = chrome_driver_inner.find_element_by_id('MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner['new_report_string'],\n report_info_inner['start_date'],\n report_info_inner['end_date']))\n\n start_date_text = chrome_driver_inner.find_element_by_id('MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n\n end_date_text = chrome_driver_inner.find_element_by_id('MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME,\n report_info=None):\n\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n \n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n\n recon_df = pcard_df['PCard Reconciliation Report']\n\n names = recon_df['Employee Name'].dropna().unique()\n\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__), 'chromedriver.exe'))\n\n did_not_finish_list= []\n finished_users =[]\n\n logon_website = report_info['logon_website']\n\n chrome_driver.get(logon_website)\n\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n\n chosen_names = names\n\n for current_id, the_name in enumerate(chosen_names):\n\n chrome_driver.implicitly_wait(0)\n\n print('Processing user {} of {}, {}'.format(current_id+1, len(chosen_names), the_name))\n\n current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n\n configuration_link = chrome_driver.find_element_by_id('topNavToolsConfigurationLink')\n configuration_link.click()\n\n view_and_edit_users = chrome_driver.find_element_by_id('MainContent_ctl69')\n view_and_edit_users.click()\n\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n\n user_tag = chrome_driver.find_element_by_xpath(\"//nobr[text() = \\\"{}\\\"]\".format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n\n switch_user = chrome_driver.find_element_by_link_text('Switch to this User')\n switch_user.click()\n\n more_items = chrome_driver.find_element_by_id('MainContent_lblWalletMoreItems')\n more_items.click()\n\n transaction_list = chrome_driver.find_elements_by_xpath(\"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\")\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\").click()\n\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n\n #time.sleep(3)\n\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n\n try:\n add_to_existing = chrome_driver.find_element_by_id('MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n\n chrome_driver.implicitly_wait(0)\n\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Next')\n next_button.click()\n\n selected_report = Select(chrome_driver.find_element_by_id('MainContent_SelectedExpenseReport'))\n\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.format(report_info['report_executive_string'], \n report_info['start_date'],\n report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Back')\n back_button.click()\n\n create_new_report(chrome_driver, report_info)\n\n\n next_button_2= chrome_driver.find_element_by_id('MainContent_AAWiz__Next')\n next_button_2.click()\n\n finished_users.append(the_name)\n\n current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from Distributions import UniformDistribution
from EventGenerator import Generator
from Processor import Processor
class Modeller:
def __init__(self, generator, operators, computers):
self._generator = generator
self._operators = operators
self._computers = computers
def event_mode(self):
refusals = 0
processed = 0
generated_requests = self._generator.num_requests
generator = self._generator
generator.receivers = self._operators.copy()
self._operators[0].receivers = [self._computers[0]]
self._operators[1].receivers = [self._computers[0]]
self._operators[2].receivers = [self._computers[1]]
generator.next = generator.next_time()
self._operators[0].next = self._operators[0].next_time()
blocks = [
generator,
self._operators[0],
self._operators[1],
self._operators[2],
self._computers[0],
self._computers[1],
]
while generator.num_requests >= 0:
# находим наименьшее время
current_time = generator.next
for block in blocks:
if 0 < block.next < current_time:
current_time = block.next
# для каждого из блоков
for block in blocks:
# если событие наступило для этого блока
if current_time == block.next:
if not isinstance(block, Processor):
# для генератора
# проверяем, может ли оператор обработать
next_generator = generator.generate_request()
if next_generator is not None:
next_generator.next = \
current_time + next_generator.next_time()
processed += 1
else:
refusals += 1
generator.next = current_time + generator.next_time()
else:
block.process_request()
if block.current_queue_size == 0:
block.next = 0
else:
block.next = current_time + block.next_time()
return {"refusal_percentage": refusals / generated_requests * 100,
"refusals": refusals,
"processed": processed,
}
|
normal
|
{
"blob_id": "11ed7550c25ca9944ce7073d9655cb9af7bdeae9",
"index": 1324,
"step-1": "<mask token>\n\n\nclass Modeller:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Modeller:\n <mask token>\n\n def event_mode(self):\n refusals = 0\n processed = 0\n generated_requests = self._generator.num_requests\n generator = self._generator\n generator.receivers = self._operators.copy()\n self._operators[0].receivers = [self._computers[0]]\n self._operators[1].receivers = [self._computers[0]]\n self._operators[2].receivers = [self._computers[1]]\n generator.next = generator.next_time()\n self._operators[0].next = self._operators[0].next_time()\n blocks = [generator, self._operators[0], self._operators[1], self.\n _operators[2], self._computers[0], self._computers[1]]\n while generator.num_requests >= 0:\n current_time = generator.next\n for block in blocks:\n if 0 < block.next < current_time:\n current_time = block.next\n for block in blocks:\n if current_time == block.next:\n if not isinstance(block, Processor):\n next_generator = generator.generate_request()\n if next_generator is not None:\n next_generator.next = (current_time +\n next_generator.next_time())\n processed += 1\n else:\n refusals += 1\n generator.next = current_time + generator.next_time()\n else:\n block.process_request()\n if block.current_queue_size == 0:\n block.next = 0\n else:\n block.next = current_time + block.next_time()\n return {'refusal_percentage': refusals / generated_requests * 100,\n 'refusals': refusals, 'processed': processed}\n",
"step-3": "<mask token>\n\n\nclass Modeller:\n\n def __init__(self, generator, operators, computers):\n self._generator = generator\n self._operators = operators\n self._computers = computers\n\n def event_mode(self):\n refusals = 0\n processed = 0\n generated_requests = self._generator.num_requests\n generator = self._generator\n generator.receivers = self._operators.copy()\n self._operators[0].receivers = [self._computers[0]]\n self._operators[1].receivers = [self._computers[0]]\n self._operators[2].receivers = [self._computers[1]]\n generator.next = generator.next_time()\n self._operators[0].next = self._operators[0].next_time()\n blocks = [generator, self._operators[0], self._operators[1], self.\n _operators[2], self._computers[0], self._computers[1]]\n while generator.num_requests >= 0:\n current_time = generator.next\n for block in blocks:\n if 0 < block.next < current_time:\n current_time = block.next\n for block in blocks:\n if current_time == block.next:\n if not isinstance(block, Processor):\n next_generator = generator.generate_request()\n if next_generator is not None:\n next_generator.next = (current_time +\n next_generator.next_time())\n processed += 1\n else:\n refusals += 1\n generator.next = current_time + generator.next_time()\n else:\n block.process_request()\n if block.current_queue_size == 0:\n block.next = 0\n else:\n block.next = current_time + block.next_time()\n return {'refusal_percentage': refusals / generated_requests * 100,\n 'refusals': refusals, 'processed': processed}\n",
"step-4": "from Distributions import UniformDistribution\nfrom EventGenerator import Generator\nfrom Processor import Processor\n\n\nclass Modeller:\n\n def __init__(self, generator, operators, computers):\n self._generator = generator\n self._operators = operators\n self._computers = computers\n\n def event_mode(self):\n refusals = 0\n processed = 0\n generated_requests = self._generator.num_requests\n generator = self._generator\n generator.receivers = self._operators.copy()\n self._operators[0].receivers = [self._computers[0]]\n self._operators[1].receivers = [self._computers[0]]\n self._operators[2].receivers = [self._computers[1]]\n generator.next = generator.next_time()\n self._operators[0].next = self._operators[0].next_time()\n blocks = [generator, self._operators[0], self._operators[1], self.\n _operators[2], self._computers[0], self._computers[1]]\n while generator.num_requests >= 0:\n current_time = generator.next\n for block in blocks:\n if 0 < block.next < current_time:\n current_time = block.next\n for block in blocks:\n if current_time == block.next:\n if not isinstance(block, Processor):\n next_generator = generator.generate_request()\n if next_generator is not None:\n next_generator.next = (current_time +\n next_generator.next_time())\n processed += 1\n else:\n refusals += 1\n generator.next = current_time + generator.next_time()\n else:\n block.process_request()\n if block.current_queue_size == 0:\n block.next = 0\n else:\n block.next = current_time + block.next_time()\n return {'refusal_percentage': refusals / generated_requests * 100,\n 'refusals': refusals, 'processed': processed}\n",
"step-5": " \nfrom Distributions import UniformDistribution\nfrom EventGenerator import Generator\nfrom Processor import Processor\n\n\nclass Modeller:\n def __init__(self, generator, operators, computers):\n self._generator = generator\n self._operators = operators\n self._computers = computers\n\n def event_mode(self):\n refusals = 0\n processed = 0\n generated_requests = self._generator.num_requests\n generator = self._generator\n\n generator.receivers = self._operators.copy()\n self._operators[0].receivers = [self._computers[0]]\n self._operators[1].receivers = [self._computers[0]]\n self._operators[2].receivers = [self._computers[1]]\n\n generator.next = generator.next_time()\n self._operators[0].next = self._operators[0].next_time()\n\n blocks = [\n generator,\n self._operators[0],\n self._operators[1],\n self._operators[2],\n self._computers[0],\n self._computers[1],\n ]\n\n while generator.num_requests >= 0:\n # находим наименьшее время\n current_time = generator.next\n for block in blocks:\n if 0 < block.next < current_time:\n current_time = block.next\n\n # для каждого из блоков\n for block in blocks:\n # если событие наступило для этого блока\n if current_time == block.next:\n if not isinstance(block, Processor):\n # для генератора \n # проверяем, может ли оператор обработать\n next_generator = generator.generate_request()\n if next_generator is not None:\n next_generator.next = \\\n current_time + next_generator.next_time()\n processed += 1\n else:\n refusals += 1\n generator.next = current_time + generator.next_time()\n else:\n block.process_request()\n if block.current_queue_size == 0:\n block.next = 0\n else:\n block.next = current_time + block.next_time()\n\n return {\"refusal_percentage\": refusals / generated_requests * 100,\n \"refusals\": refusals,\n \"processed\": processed,\n }",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2
import random
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
# Read image and normalize its pixels to [0,1]
img = plt.imread(os.path.join(self.root,img_name)) / 255
# Expand grayscale image to three channel.
if len(img.shape) == 2:
img = img[:,:,np.newaxis]
img = np.concatenate((img,img,img),2)
# Read ground truth density-map
density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy')))
# Downsample image and density-map to match model's input
if self.downsample >1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img,(cols*self.downsample, rows*self.downsample))
img = img.transpose((2,0,1)) # convert to order (channel,rows,cols)
density_map = cv2.resize(density_map, (cols,rows))
density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample
# transform image and density_map to tensors
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
# Apply any other transformation
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
# Test code
if __name__== "__main__":
root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'
dataset = ShanghaiTechPartA(root,
transform=transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
downsample=8)
index = random.randint(0, len(dataset))
img, dmap = dataset[index]
print(index, img.shape, dmap.shape)
|
normal
|
{
"blob_id": "8a0a98ab072e46463d80d8638c830e6db0032a77",
"index": 8101,
"step-1": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n <mask token>\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\nif __name__ == '__main__':\n root = (\n '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n )\n dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[\n 0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n",
"step-4": "import cv2\nimport random\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\nif __name__ == '__main__':\n root = (\n '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n )\n dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[\n 0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n",
"step-5": "import cv2\nimport random\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass ShanghaiTechPartA(Dataset):\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n # Read image and normalize its pixels to [0,1]\n img = plt.imread(os.path.join(self.root,img_name)) / 255\n # Expand grayscale image to three channel.\n if len(img.shape) == 2:\n img = img[:,:,np.newaxis]\n img = np.concatenate((img,img,img),2)\n\n # Read ground truth density-map\n density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy')))\n\n # Downsample image and density-map to match model's input\n if self.downsample >1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img,(cols*self.downsample, rows*self.downsample))\n img = img.transpose((2,0,1)) # convert to order (channel,rows,cols)\n density_map = cv2.resize(density_map, (cols,rows))\n density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample\n # transform image and density_map to tensors\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n # Apply any other transformation\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n\n return img_tensor, density_map_tensor\n\n\n# Test code\nif __name__== \"__main__\":\n root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n dataset = ShanghaiTechPartA(root,\n transform=transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#finding postgresql info
import re
import subprocess
def get_postgre_version():
p = subprocess.Popen("psql --version",stdout=subprocess.PIPE,shell=True)
k = re.findall(r'psql\s+\(PostgreSQL\)\s+(.*)',p.stdout.read())
postgre_version = k[0]
return postgre_version
version=get_postgre_version()
print version
|
normal
|
{
"blob_id": "e6b84a2190a84c871e7191ef49fb7ee8b8148c9a",
"index": 7062,
"step-1": "#finding postgresql info\nimport re\nimport subprocess\ndef get_postgre_version():\n p = subprocess.Popen(\"psql --version\",stdout=subprocess.PIPE,shell=True)\n k = re.findall(r'psql\\s+\\(PostgreSQL\\)\\s+(.*)',p.stdout.read())\n postgre_version = k[0]\n return postgre_version\n\n\nversion=get_postgre_version()\nprint version\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class UF(object):
<|reserved_special_token_0|>
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i == j
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class UF(object):
<|reserved_special_token_0|>
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i == j
def __len__(self):
return self.n
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class UF(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.weights = [(1) for i in range(n)]
self.n = n
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i == j
def __len__(self):
return self.n
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class UF(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.weights = [(1) for i in range(n)]
self.n = n
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i == j
def __len__(self):
return self.n
if __name__ == '__main__':
uf = UF(10)
uf.union(1, 2)
uf.union(3, 4)
uf.union(2, 4)
assert len(uf) == 7
assert uf.is_connected(1, 4)
assert not uf.is_connected(1, 5)
<|reserved_special_token_1|>
class UF(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.weights = [1 for i in range(n)]
self.n = n
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i== j
def __len__(self):
return self.n
if __name__ == '__main__':
uf = UF(10)
uf.union(1, 2)
uf.union(3, 4)
uf.union(2, 4)
assert len(uf) == 7
assert uf.is_connected(1, 4)
assert not uf.is_connected(1, 5)
|
flexible
|
{
"blob_id": "c8d5b8515a468190d14311118e12a7d414908be6",
"index": 8109,
"step-1": "class UF(object):\n <mask token>\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n <mask token>\n\n\n<mask token>\n",
"step-2": "class UF(object):\n <mask token>\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\n<mask token>\n",
"step-3": "class UF(object):\n\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [(1) for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\n<mask token>\n",
"step-4": "class UF(object):\n\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [(1) for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\nif __name__ == '__main__':\n uf = UF(10)\n uf.union(1, 2)\n uf.union(3, 4)\n uf.union(2, 4)\n assert len(uf) == 7\n assert uf.is_connected(1, 4)\n assert not uf.is_connected(1, 5)\n",
"step-5": "class UF(object):\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [1 for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i== j\n\n def __len__(self):\n return self.n\n\n\nif __name__ == '__main__':\n uf = UF(10)\n uf.union(1, 2)\n uf.union(3, 4)\n uf.union(2, 4)\n\n assert len(uf) == 7\n\n assert uf.is_connected(1, 4)\n assert not uf.is_connected(1, 5)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from abc import ABC, abstractmethod
from raspberry_home.view.geometry import *
from raspberry_home.view.renderable import Renderable
class View(Renderable, ABC):
@abstractmethod
def content_size(self, container_size: Size) ->Size:
pass
|
normal
|
{
"blob_id": "913ff9b811d3abbe43bda0554e40a6a2c87053be",
"index": 4449,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass View(Renderable, ABC):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass View(Renderable, ABC):\n\n @abstractmethod\n def content_size(self, container_size: Size) ->Size:\n pass\n",
"step-4": "from abc import ABC, abstractmethod\nfrom raspberry_home.view.geometry import *\nfrom raspberry_home.view.renderable import Renderable\n\n\nclass View(Renderable, ABC):\n\n @abstractmethod\n def content_size(self, container_size: Size) ->Size:\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class GAME:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
<|reserved_special_token_0|>
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GAME:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GAME:
def __init__(self, mode) ->None:
self.playing = 0
self.mode = mode
self.coin = Coin(self.mode)
self.moving_coin = pygame.sprite.Group()
self.moving_coin.add(self.coin)
self.snake = Snake(self.mode)
self.bombs = [Bomb(self.mode)]
self.rockets = []
self.condition = 4
self.crowd = 2
self.count = 0
self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]
self.game_timer = 0
self.game_over = False
def refresh(self, mode):
self.__init__(mode)
return 1, 1
<|reserved_special_token_0|>
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GAME:
def __init__(self, mode) ->None:
self.playing = 0
self.mode = mode
self.coin = Coin(self.mode)
self.moving_coin = pygame.sprite.Group()
self.moving_coin.add(self.coin)
self.snake = Snake(self.mode)
self.bombs = [Bomb(self.mode)]
self.rockets = []
self.condition = 4
self.crowd = 2
self.count = 0
self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]
self.game_timer = 0
self.game_over = False
def refresh(self, mode):
self.__init__(mode)
return 1, 1
def update(self):
self.snake.move_snake()
self.check_collision()
self.check_fail()
self.rem_rockets()
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
<|reserved_special_token_1|>
import pygame
from .Coin import Coin
from .Snake import Snake, Block
from .Bomb import Bomb
from .Rocket import Rocket
from pygame.math import Vector2
cell_size = 16
cell_number = 30
sprite_cell = pygame.image.load("Assets/Cell.png")
bg = pygame.image.load("Assets/BG.png")
bg2 = pygame.image.load("Assets/BG2.png")
class GAME():
def __init__(self, mode) -> None:
self.playing = 0
self.mode = mode
# Classic mode
# Colorfull mode with assets etc
self.coin = Coin(self.mode)
self.moving_coin = pygame.sprite.Group()
self.moving_coin.add(self.coin)
self.snake = Snake(self.mode)
self.bombs = [Bomb(self.mode)]
self.rockets = []
self.condition = 4
self.crowd = 2
self.count = 0
self.anim_pos = [Vector2(-1,-1), Vector2(-1,-1), Vector2(-1,-1)]
self.game_timer = 0
self.game_over = False
# self.acc = 0.1
# self.difficulty = 0
def refresh(self, mode):
self.__init__(mode)
return 1, 1
def update(self):
self.snake.move_snake()
self.check_collision()
self.check_fail()
self.rem_rockets()
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1 :
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:] :
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x, self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
|
flexible
|
{
"blob_id": "2b14607aa2527f5da57284917d06ea60e89f784c",
"index": 1659,
"step-1": "<mask token>\n\n\nclass GAME:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n <mask token>\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n",
"step-2": "<mask token>\n\n\nclass GAME:\n <mask token>\n <mask token>\n <mask token>\n\n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n",
"step-3": "<mask token>\n\n\nclass GAME:\n\n def __init__(self, mode) ->None:\n self.playing = 0\n self.mode = mode\n self.coin = Coin(self.mode)\n self.moving_coin = pygame.sprite.Group()\n self.moving_coin.add(self.coin)\n self.snake = Snake(self.mode)\n self.bombs = [Bomb(self.mode)]\n self.rockets = []\n self.condition = 4\n self.crowd = 2\n self.count = 0\n self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]\n self.game_timer = 0\n self.game_over = False\n\n def refresh(self, mode):\n self.__init__(mode)\n return 1, 1\n <mask token>\n\n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n",
"step-4": "<mask token>\n\n\nclass GAME:\n\n def __init__(self, mode) ->None:\n self.playing = 0\n self.mode = mode\n self.coin = Coin(self.mode)\n self.moving_coin = pygame.sprite.Group()\n self.moving_coin.add(self.coin)\n self.snake = Snake(self.mode)\n self.bombs = [Bomb(self.mode)]\n self.rockets = []\n self.condition = 4\n self.crowd = 2\n self.count = 0\n self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]\n self.game_timer = 0\n self.game_over = False\n\n def refresh(self, mode):\n self.__init__(mode)\n return 1, 1\n\n def update(self):\n self.snake.move_snake()\n self.check_collision()\n self.check_fail()\n self.rem_rockets()\n\n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n",
"step-5": "import pygame\nfrom .Coin import Coin\nfrom .Snake import Snake, Block\nfrom .Bomb import Bomb\nfrom .Rocket import Rocket\nfrom pygame.math import Vector2\n\ncell_size = 16\ncell_number = 30\n\nsprite_cell = pygame.image.load(\"Assets/Cell.png\")\nbg = pygame.image.load(\"Assets/BG.png\")\nbg2 = pygame.image.load(\"Assets/BG2.png\")\n\nclass GAME():\n def __init__(self, mode) -> None:\n self.playing = 0\n\n self.mode = mode\n # Classic mode \n # Colorfull mode with assets etc\n\n self.coin = Coin(self.mode)\n\n self.moving_coin = pygame.sprite.Group()\n self.moving_coin.add(self.coin)\n\n self.snake = Snake(self.mode)\n self.bombs = [Bomb(self.mode)]\n self.rockets = []\n\n self.condition = 4\n self.crowd = 2\n self.count = 0\n\n self.anim_pos = [Vector2(-1,-1), Vector2(-1,-1), Vector2(-1,-1)]\n\n self.game_timer = 0\n\n self.game_over = False\n\n # self.acc = 0.1\n # self.difficulty = 0\n\n def refresh(self, mode):\n self.__init__(mode)\n return 1, 1\n\n def update(self):\n self.snake.move_snake()\n self.check_collision()\n self.check_fail()\n self.rem_rockets()\n \n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n \n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n \n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1 :\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n\n def check_fail(self):\n if not 0 <= self.snake.body[0].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n \n for block in self.snake.body[1:] :\n if block == self.snake.body[0]:\n self.game_over = 1\n\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x, self.snake.body[0].y).rect):\n self.game_over = 1\n\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1",
"step-ids": [
5,
7,
9,
10,
13
]
}
|
[
5,
7,
9,
10,
13
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 20 14:48:56 2020
@author: dhk1349
"""
n = int(input()) #목표채널
m = int(input())
broken=[int(i) for i in input().split()] #망가진 버튼
normal=[i for i in range(10)] #사용가능한 버튼
ans=abs(n-100) #시작 시 정답
for i in broken:
normal.remove(i)
tempnum=0
iternum=1
def solve(lst, target):
#가장 유사한 숫자를 뱉
while n!=0:
val=n%10
n=n/10
if val not in normal:
tempnum+=(iternum*val)
iternum*=10
|
normal
|
{
"blob_id": "2a6ae615b427a7c970aacf9804865ea7952d065f",
"index": 5888,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 20 14:48:56 2020\n\n@author: dhk1349\n\"\"\"\n\nn = int(input()) #목표채널 \nm = int(input())\nbroken=[int(i) for i in input().split()] #망가진 버튼 \nnormal=[i for i in range(10)] #사용가능한 버튼 \nans=abs(n-100) #시작 시 정답 \n\n\nfor i in broken:\n normal.remove(i)\n\n\ntempnum=0\niternum=1\n\ndef solve(lst, target):\n #가장 유사한 숫자를 뱉\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\nwhile n!=0:\n val=n%10\n n=n/10\n \n if val not in normal:\n \n \n tempnum+=(iternum*val)\n iternum*=10\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('newuser/', NewUserView.as_view(), name='newuser'), url(
'login/', LoginView.as_view(), name='login'), url('logout/',
logout_user, name='logout'), url('delete/$', delete_user, name=
'deleteuser')]
<|reserved_special_token_1|>
from django.conf.urls import url
from .views import LoginView, logout_user, delete_user
from .views import NewUserView
urlpatterns = [url('newuser/', NewUserView.as_view(), name='newuser'), url(
'login/', LoginView.as_view(), name='login'), url('logout/',
logout_user, name='logout'), url('delete/$', delete_user, name=
'deleteuser')]
<|reserved_special_token_1|>
from django.conf.urls import url
from .views import LoginView, logout_user, delete_user
from .views import NewUserView
urlpatterns = [
url(r'newuser/', NewUserView.as_view(), name='newuser'),
url(r'login/', LoginView.as_view(), name='login'),
url(r'logout/', logout_user, name='logout'),
url(r'delete/$', delete_user, name='deleteuser'),
]
|
flexible
|
{
"blob_id": "9b4bc7f8f9c96f503a5ed79827430963e21718c4",
"index": 3733,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('newuser/', NewUserView.as_view(), name='newuser'), url(\n 'login/', LoginView.as_view(), name='login'), url('logout/',\n logout_user, name='logout'), url('delete/$', delete_user, name=\n 'deleteuser')]\n",
"step-3": "from django.conf.urls import url\nfrom .views import LoginView, logout_user, delete_user\nfrom .views import NewUserView\nurlpatterns = [url('newuser/', NewUserView.as_view(), name='newuser'), url(\n 'login/', LoginView.as_view(), name='login'), url('logout/',\n logout_user, name='logout'), url('delete/$', delete_user, name=\n 'deleteuser')]\n",
"step-4": "from django.conf.urls import url\nfrom .views import LoginView, logout_user, delete_user\nfrom .views import NewUserView\n\nurlpatterns = [\n url(r'newuser/', NewUserView.as_view(), name='newuser'),\n url(r'login/', LoginView.as_view(), name='login'),\n url(r'logout/', logout_user, name='logout'),\n url(r'delete/$', delete_user, name='deleteuser'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print("Hi Tom")
|
normal
|
{
"blob_id": "e838a52fecbf69719acc6de38b5f045e792e1408",
"index": 9232,
"step-1": "<mask token>\n",
"step-2": "print('Hi Tom')\n",
"step-3": "print(\"Hi Tom\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2011 Lionel Bergeret
#
# ----------------------------------------------------------------
# The contents of this file are distributed under the CC0 license.
# See http://creativecommons.org/publicdomain/zero/1.0/
# ----------------------------------------------------------------
import os
import re
import cPickle
from optparse import OptionParser
# Import shapefile informations
from shapelib import ShapeFile
import dbflib
# Shapely
from shapely.geometry import Polygon
from shapely.ops import cascaded_union
# Numpy and matplotlib
import numpy as np
from matplotlib.nxutils import points_inside_poly
try:
import psyco
psyco.full()
except ImportError:
print "Psyco plugin missing, will run slower"
pass
def main(shapefile, picklefile):
if picklefile:
[npts, x, y, z, zraw, xil, yil, grid, missing] = cPickle.load(open(picklefile,'rb'))
points = np.vstack((x,y)).T
# Load administrative area
shp = ShapeFile(shapefile)
dbf = dbflib.open(shapefile)
coastline = []
# Process every shape from the ShapeFile
print "Processing shapes ..."
for npoly in range(shp.info()[0]):
shp_object = shp.read_object(npoly)
shp_dict = dbf.read_record(npoly)
verts = shp_object.vertices()
if "NAME_1" in shp_dict:
name = "%s" % (shp_dict["NAME_1"])
else:
name = "Unknown"
print "Processing %s" % (name)
# Extract city polygon vertices (ring per ring)
for ring in verts:
vx = []
vy = []
for point in ring:
vx.append(point[0])
vy.append(point[1])
# Only process big enough rings
if len(vx) > 256: # big enough
poly_verts = zip(vx,vy)
if picklefile:
# Compute intersections with the city
intersection = points_inside_poly(points, poly_verts)
npts = sum(1 for x in points_inside_poly(points, poly_verts) if x)
else:
npts = 1 # Add this polygon
# Add the ring to the coastine if measures inside
if npts > 0:
polygon = Polygon(poly_verts)
if not polygon.is_empty and polygon.is_valid:
print "- Add polygon (%d)" % (len(vx))
coastline.append(polygon)
else:
print "- Skip polygon (%d)" % (len(vx))
print "Union of %d polygons" % len(coastline)
coast = cascaded_union(coastline)
cPickle.dump(coast,open('coastline.pickle','wb'),-1)
print "Done."
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == '__main__':
parser = OptionParser("Usage: safecastCoastline <shapefile>")
parser.add_option("-s", "--safecast", dest="scfilename",
help="provice the safecast.pickle file for intersections.", metavar="FILE")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Wrong number of arguments")
main(args[0], options.scfilename)
|
normal
|
{
"blob_id": "b7aa99e9e4af3bef4b2b3e7d8ab9bf159a093af6",
"index": 574,
"step-1": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Copyright (C) 2011 Lionel Bergeret\n#\n# ----------------------------------------------------------------\n# The contents of this file are distributed under the CC0 license.\n# See http://creativecommons.org/publicdomain/zero/1.0/\n# ----------------------------------------------------------------\n\nimport os\nimport re\nimport cPickle\nfrom optparse import OptionParser\n\n# Import shapefile informations\nfrom shapelib import ShapeFile\nimport dbflib\n\n# Shapely\nfrom shapely.geometry import Polygon\nfrom shapely.ops import cascaded_union\n\n# Numpy and matplotlib\nimport numpy as np\nfrom matplotlib.nxutils import points_inside_poly\n\ntry:\n import psyco\n psyco.full()\nexcept ImportError:\n print \"Psyco plugin missing, will run slower\"\n pass\n\ndef main(shapefile, picklefile):\n if picklefile:\n [npts, x, y, z, zraw, xil, yil, grid, missing] = cPickle.load(open(picklefile,'rb'))\n points = np.vstack((x,y)).T\n\n # Load administrative area\n shp = ShapeFile(shapefile)\n dbf = dbflib.open(shapefile)\n\n coastline = []\n\n # Process every shape from the ShapeFile\n print \"Processing shapes ...\"\n for npoly in range(shp.info()[0]):\n shp_object = shp.read_object(npoly)\n shp_dict = dbf.read_record(npoly)\n verts = shp_object.vertices()\n\n if \"NAME_1\" in shp_dict:\n name = \"%s\" % (shp_dict[\"NAME_1\"])\n else:\n name = \"Unknown\"\n\n print \"Processing %s\" % (name)\n # Extract city polygon vertices (ring per ring)\n for ring in verts:\n vx = []\n vy = []\n for point in ring:\n vx.append(point[0])\n vy.append(point[1])\n\n # Only process big enough rings\n if len(vx) > 256: # big enough\n poly_verts = zip(vx,vy)\n\n if picklefile:\n # Compute intersections with the city\n intersection = points_inside_poly(points, poly_verts)\n npts = sum(1 for x in points_inside_poly(points, poly_verts) if x)\n else:\n npts = 1 # Add this polygon\n\n # Add the ring to the coastine if measures inside\n if npts > 0:\n polygon = Polygon(poly_verts)\n if not polygon.is_empty and polygon.is_valid:\n print \"- Add polygon (%d)\" % (len(vx))\n coastline.append(polygon)\n else:\n print \"- Skip polygon (%d)\" % (len(vx))\n \n print \"Union of %d polygons\" % len(coastline)\n coast = cascaded_union(coastline)\n cPickle.dump(coast,open('coastline.pickle','wb'),-1)\n print \"Done.\"\n\n# -----------------------------------------------------------------------------\n# Main\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n parser = OptionParser(\"Usage: safecastCoastline <shapefile>\")\n parser.add_option(\"-s\", \"--safecast\", dest=\"scfilename\",\n help=\"provice the safecast.pickle file for intersections.\", metavar=\"FILE\")\n\n (options, args) = parser.parse_args()\n \n if len(args) != 1:\n parser.error(\"Wrong number of arguments\")\n\n main(args[0], options.scfilename)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ArticleTools(Articles):
<|reserved_special_token_0|>
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.
id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContractorTools(Contractor):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def update_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.update_data()
<|reserved_special_token_0|>
class ArticleTools(Articles):
"""
Работа со справочником ТП
"""
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.
id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContractorTools(Contractor):
"""
Работа со справочником КА
"""
@staticmethod
def add_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.insert()
@staticmethod
def delete_contractor(**kwargs):
ca = Contractor()
ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.delete_data()
@staticmethod
def update_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.update_data()
@staticmethod
def get_contractors():
contr = Contractor()
contrs = contr.select_expression()
contrs = contr.db_obj_to_dict(*contrs)
return dict(contractors=contrs)
class ArticleTools(Articles):
"""
Работа со справочником ТП
"""
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.
id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
<|reserved_special_token_1|>
from app.models.tables import Warehouse, Contractor, Articles
class ContractorTools(Contractor):
"""
Работа со справочником КА
"""
@staticmethod
def add_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.insert()
@staticmethod
def delete_contractor(**kwargs):
ca = Contractor()
ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.delete_data()
@staticmethod
def update_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.update_data()
@staticmethod
def get_contractors():
contr = Contractor()
contrs = contr.select_expression()
contrs = contr.db_obj_to_dict(*contrs)
return dict(contractors=contrs)
class ArticleTools(Articles):
"""
Работа со справочником ТП
"""
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.
id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
<|reserved_special_token_1|>
from app.models.tables import Warehouse, Contractor, Articles
class ContractorTools(Contractor):
"""
Работа со справочником КА
"""
@staticmethod
def add_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.insert()
@staticmethod
def delete_contractor(**kwargs):
ca = Contractor()
ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.delete_data()
@staticmethod
def update_contractor(**kwargs):
ca = Contractor(**kwargs)
# ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.update_data()
@staticmethod
def get_contractors():
contr = Contractor()
contrs = contr.select_expression()
contrs = contr.db_obj_to_dict(*contrs)
return dict(contractors=contrs)
class ArticleTools(Articles):
"""
Работа со справочником ТП
"""
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
# ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
|
flexible
|
{
"blob_id": "79c4a2d4503c2639950675b398e000aae367ff4a",
"index": 8117,
"step-1": "<mask token>\n\n\nclass ArticleTools(Articles):\n <mask token>\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-2": "<mask token>\n\n\nclass ContractorTools(Contractor):\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.update_data()\n <mask token>\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-3": "<mask token>\n\n\nclass ContractorTools(Contractor):\n \"\"\"\n Работа со справочником КА\n \"\"\"\n\n @staticmethod\n def add_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.insert()\n\n @staticmethod\n def delete_contractor(**kwargs):\n ca = Contractor()\n ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.delete_data()\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.update_data()\n\n @staticmethod\n def get_contractors():\n contr = Contractor()\n contrs = contr.select_expression()\n contrs = contr.db_obj_to_dict(*contrs)\n return dict(contractors=contrs)\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-4": "from app.models.tables import Warehouse, Contractor, Articles\n\n\nclass ContractorTools(Contractor):\n \"\"\"\n Работа со справочником КА\n \"\"\"\n\n @staticmethod\n def add_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.insert()\n\n @staticmethod\n def delete_contractor(**kwargs):\n ca = Contractor()\n ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.delete_data()\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.update_data()\n\n @staticmethod\n def get_contractors():\n contr = Contractor()\n contrs = contr.select_expression()\n contrs = contr.db_obj_to_dict(*contrs)\n return dict(contractors=contrs)\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-5": "from app.models.tables import Warehouse, Contractor, Articles\n\n\nclass ContractorTools(Contractor):\n \"\"\"\n Работа со справочником КА\n \"\"\"\n\n @staticmethod\n def add_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.insert()\n\n @staticmethod\n def delete_contractor(**kwargs):\n ca = Contractor()\n ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.delete_data()\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n # ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.update_data()\n\n @staticmethod\n def get_contractors():\n contr = Contractor()\n contrs = contr.select_expression()\n contrs = contr.db_obj_to_dict(*contrs)\n return dict(contractors=contrs)\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n # ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-ids": [
12,
15,
19,
20,
21
]
}
|
[
12,
15,
19,
20,
21
] |
class Solution:
def uniquePaths(self, A, B):
# A - rows
# B - columns
if A == 0 or B == 0:
return 0
grid = [[1 for _ in range(B)] for _ in range(A)]
for i in range(1, A):
for j in range(1, B):
grid[i][j] = grid[i-1][j] + grid[i][j-1]
return grid[A-1][B-1]
s = Solution()
print s.uniquePath(2, 2)
|
normal
|
{
"blob_id": "027e53d69cfece0672556e34fa901412e483bc3e",
"index": 8805,
"step-1": "class Solution:\n\n def uniquePaths(self, A, B):\n # A - rows\n # B - columns\n if A == 0 or B == 0:\n return 0\n\n grid = [[1 for _ in range(B)] for _ in range(A)]\n\n for i in range(1, A):\n for j in range(1, B):\n grid[i][j] = grid[i-1][j] + grid[i][j-1]\n\n return grid[A-1][B-1]\n\n\ns = Solution()\n\nprint s.uniquePath(2, 2)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding: utf-8
import os
import factory
import datetime
from journalmanager import models
from django.contrib.auth.models import Group
from django.core.files.base import File
_HERE = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file:
SAMPLE_XML = xml_file.read()
SAMPLE_TIFF_IMAGE = open(
os.path.join(_HERE, 'image_test', 'sample_tif_image.tif'))
with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:
SAMPLE_XML_RELATED = xml_file.read()
class UserFactory(factory.Factory):
FACTORY_FOR = models.User
@classmethod
def _setup_next_sequence(cls):
try:
return cls._associated_class.objects.values_list(
'id', flat=True).order_by('-id')[0] + 1
except IndexError:
return 0
username = factory.Sequence(lambda n: "jmanager_username%s" % n)
first_name = factory.Sequence(lambda n: "jmanager_first_name%s" % n)
last_name = factory.Sequence(lambda n: "jmanager_last_name%s" % n)
email = factory.Sequence(lambda n: "jmanager_email%s@example.com" % n)
password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2000, 1, 1)
date_joined = datetime.datetime(1999, 1, 1)
class GroupFactory(factory.Factory):
FACTORY_FOR = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
email = 'fapesp@scielo.org'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'fapesp@scielo.org'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA""".strip()
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
editor_email = u'cbcd@cbcd.org.br'
creator = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(
lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
|
normal
|
{
"blob_id": "44d87f112ab60a202e4c8d64d7aec6f4f0d10578",
"index": 31,
"step-1": "<mask token>\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-2": "<mask token>\n\n\nclass GroupFactory(factory.Factory):\n <mask token>\n <mask token>\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'cbcd@cbcd.org.br'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-3": "<mask token>\n\n\nclass UserFactory(factory.Factory):\n <mask token>\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'cbcd@cbcd.org.br'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-4": "<mask token>\n_HERE = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')\n ) as xml_file:\n SAMPLE_XML = xml_file.read()\nSAMPLE_TIFF_IMAGE = open(os.path.join(_HERE, 'image_test',\n 'sample_tif_image.tif'))\nwith open(os.path.join(_HERE, 'xml_samples',\n '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n username = factory.Sequence(lambda n: 'jmanager_username%s' % n)\n first_name = factory.Sequence(lambda n: 'jmanager_first_name%s' % n)\n last_name = factory.Sequence(lambda n: 'jmanager_last_name%s' % n)\n email = factory.Sequence(lambda n: 'jmanager_email%s@example.com' % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'cbcd@cbcd.org.br'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-5": "# coding: utf-8\nimport os\n\nimport factory\nimport datetime\n\nfrom journalmanager import models\nfrom django.contrib.auth.models import Group\nfrom django.core.files.base import File\n\n\n_HERE = os.path.dirname(os.path.abspath(__file__))\n\n\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file:\n SAMPLE_XML = xml_file.read()\n\n\nSAMPLE_TIFF_IMAGE = open(\n os.path.join(_HERE, 'image_test', 'sample_tif_image.tif'))\n\n\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list(\n 'id', flat=True).order_by('-id')[0] + 1\n except IndexError:\n return 0\n\n username = factory.Sequence(lambda n: \"jmanager_username%s\" % n)\n first_name = factory.Sequence(lambda n: \"jmanager_first_name%s\" % n)\n last_name = factory.Sequence(lambda n: \"jmanager_last_name%s\" % n)\n email = factory.Sequence(lambda n: \"jmanager_email%s@example.com\" % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n\n name = factory.Sequence(lambda n: \"Group #%s\" % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \\\n Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\".strip()\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n editor_email = u'cbcd@cbcd.org.br'\n\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n\n title = u'Artigos Originais'\n\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(\n lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n\n\n",
"step-ids": [
22,
39,
42,
45,
47
]
}
|
[
22,
39,
42,
45,
47
] |
n = int(input())
A = list(map(int, input().split()))
g = 1000
for s1, s2 in zip(A[:-1], A[1:]):
if s1 < s2:
stockNum = g // s1
g += stockNum * (s2 - s1)
print(g)
|
normal
|
{
"blob_id": "da903409d75ba2a07443317e30bce568444fbca5",
"index": 9956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor s1, s2 in zip(A[:-1], A[1:]):\n if s1 < s2:\n stockNum = g // s1\n g += stockNum * (s2 - s1)\nprint(g)\n",
"step-3": "n = int(input())\nA = list(map(int, input().split()))\ng = 1000\nfor s1, s2 in zip(A[:-1], A[1:]):\n if s1 < s2:\n stockNum = g // s1\n g += stockNum * (s2 - s1)\nprint(g)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv('.env')
<|reserved_special_token_0|>
print(response.text)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv('.env')
USERNAME = os.getenv('USER')
TOKEN = os.getenv('TOKEN')
pixela_endpoint = 'https://pixe.la/v1/users'
user_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':
'yes', 'notMinor': 'yes'}
graph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'
graph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',
'type': 'int', 'color': 'shibafu'}
headers = {'X-USER-TOKEN': TOKEN}
post_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'
today = datetime.now()
formatted_date = today.strftime('%Y%m%d')
pixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(
'How many hours did you spend reading today? ')}
response = requests.post(url=post_pixel_endpoint, headers=headers, json=
pixel_config)
print(response.text)
update_endpoint = (
f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')
updated_pixel = {'quantity': '3'}
<|reserved_special_token_1|>
import requests
import os
from dotenv import load_dotenv
from datetime import datetime
load_dotenv('.env')
USERNAME = os.getenv('USER')
TOKEN = os.getenv('TOKEN')
pixela_endpoint = 'https://pixe.la/v1/users'
user_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':
'yes', 'notMinor': 'yes'}
graph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'
graph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',
'type': 'int', 'color': 'shibafu'}
headers = {'X-USER-TOKEN': TOKEN}
post_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'
today = datetime.now()
formatted_date = today.strftime('%Y%m%d')
pixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(
'How many hours did you spend reading today? ')}
response = requests.post(url=post_pixel_endpoint, headers=headers, json=
pixel_config)
print(response.text)
update_endpoint = (
f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')
updated_pixel = {'quantity': '3'}
<|reserved_special_token_1|>
import requests
import os
from dotenv import load_dotenv
from datetime import datetime
load_dotenv(".env") # loads the environment file
USERNAME = os.getenv("USER")
TOKEN = os.getenv("TOKEN")
pixela_endpoint = "https://pixe.la/v1/users"
# MAKING AN ACCOUNT
user_params = {
"token": TOKEN,
"username": USERNAME,
"agreeTermsOfService": "yes",
"notMinor": "yes",
}
# response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json
# print(response.text) # gives the response as a piece of text
# CREATING A GRAPH
graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs" # endpoint for the graph creation
graph_config = {
"id": "graph1",
"name": "Reading Graph",
"unit": "hours",
"type": "int",
"color": "shibafu"
}
headers = {
"X-USER-TOKEN": TOKEN
}
# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph
# print(response.text)
# POST A PIXEL
post_pixel_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1"
# today = datetime(year=2020, month=12, day=25) custom date
today = datetime.now()
formatted_date = today.strftime("%Y%m%d")
pixel_config = {
"date": today.strftime("%Y%m%d"),
"quantity": input("How many hours did you spend reading today? "),
}
response = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel
print(response.text)
# UPDATING A PIXEL
update_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}"
updated_pixel = {
"quantity": "3"
}
# response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel)
# print(response.text)
# DELETING A PIXEL
# delete_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}"
# response = requests.delete(url=delete_endpoint,headers=headers)
|
flexible
|
{
"blob_id": "ba34dfcad0cb9bac9c462bdf60e55dee6ba9d58d",
"index": 9255,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nload_dotenv('.env')\n<mask token>\nprint(response.text)\n<mask token>\n",
"step-3": "<mask token>\nload_dotenv('.env')\nUSERNAME = os.getenv('USER')\nTOKEN = os.getenv('TOKEN')\npixela_endpoint = 'https://pixe.la/v1/users'\nuser_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':\n 'yes', 'notMinor': 'yes'}\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\ngraph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',\n 'type': 'int', 'color': 'shibafu'}\nheaders = {'X-USER-TOKEN': TOKEN}\npost_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'\ntoday = datetime.now()\nformatted_date = today.strftime('%Y%m%d')\npixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(\n 'How many hours did you spend reading today? ')}\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=\n pixel_config)\nprint(response.text)\nupdate_endpoint = (\n f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')\nupdated_pixel = {'quantity': '3'}\n",
"step-4": "import requests\nimport os\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nload_dotenv('.env')\nUSERNAME = os.getenv('USER')\nTOKEN = os.getenv('TOKEN')\npixela_endpoint = 'https://pixe.la/v1/users'\nuser_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':\n 'yes', 'notMinor': 'yes'}\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\ngraph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',\n 'type': 'int', 'color': 'shibafu'}\nheaders = {'X-USER-TOKEN': TOKEN}\npost_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'\ntoday = datetime.now()\nformatted_date = today.strftime('%Y%m%d')\npixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(\n 'How many hours did you spend reading today? ')}\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=\n pixel_config)\nprint(response.text)\nupdate_endpoint = (\n f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')\nupdated_pixel = {'quantity': '3'}\n",
"step-5": "import requests\r\nimport os\r\nfrom dotenv import load_dotenv\r\nfrom datetime import datetime\r\n\r\nload_dotenv(\".env\") # loads the environment file\r\n\r\n\r\nUSERNAME = os.getenv(\"USER\")\r\nTOKEN = os.getenv(\"TOKEN\")\r\npixela_endpoint = \"https://pixe.la/v1/users\"\r\n\r\n\r\n\r\n# MAKING AN ACCOUNT\r\nuser_params = {\r\n \"token\": TOKEN,\r\n \"username\": USERNAME,\r\n \"agreeTermsOfService\": \"yes\",\r\n \"notMinor\": \"yes\",\r\n\r\n}\r\n\r\n# response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json\r\n# print(response.text) # gives the response as a piece of text\r\n\r\n\r\n# CREATING A GRAPH\r\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\" # endpoint for the graph creation\r\n\r\ngraph_config = {\r\n \"id\": \"graph1\",\r\n \"name\": \"Reading Graph\",\r\n \"unit\": \"hours\",\r\n \"type\": \"int\",\r\n \"color\": \"shibafu\"\r\n\r\n}\r\n\r\nheaders = {\r\n \"X-USER-TOKEN\": TOKEN\r\n}\r\n\r\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph\r\n# print(response.text)\r\n\r\n\r\n# POST A PIXEL\r\npost_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1\"\r\n\r\n\r\n# today = datetime(year=2020, month=12, day=25) custom date\r\ntoday = datetime.now()\r\nformatted_date = today.strftime(\"%Y%m%d\")\r\npixel_config = {\r\n \"date\": today.strftime(\"%Y%m%d\"),\r\n \"quantity\": input(\"How many hours did you spend reading today? \"),\r\n\r\n}\r\n\r\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel\r\nprint(response.text)\r\n\r\n\r\n# UPDATING A PIXEL\r\n\r\nupdate_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}\"\r\nupdated_pixel = {\r\n \"quantity\": \"3\"\r\n}\r\n\r\n# response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel)\r\n# print(response.text)\r\n\r\n\r\n# DELETING A PIXEL\r\n\r\n# delete_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}\"\r\n# response = requests.delete(url=delete_endpoint,headers=headers)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
def nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.7 from Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))
beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))
beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))
beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.8 used in Tigerholm model """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))
sinf = 1 / (1 + np.exp((v + 45.0) / 8))
stau = 1 / (alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))
uinf = 1 / (1 + np.exp((v + 51.0) / 8))
utau = 1.0 / (alpha_u + beta_u)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
du = (uinf - u) / utau
return [dm, dh, ds, du]
def nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.8 model used in Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_0|>
def nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.9 model from Huang Waxman 2014"""
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t, voltage_clamp_params)
alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))
beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))
beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))
beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.6 model from Zach Mainen 1994 """
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t, voltage_clamp_params)
vhalf = -43.0
a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))
b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))
m_inf = a_m / (a_m + b_m)
m_tau = 1.0 / (a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))
b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))
h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))
h_tau = 1.0 / (a_h + b_h)
dm = (m_inf - m) / m_tau
dh = (h_inf - h) / h_tau
return [dm, dh]
<|reserved_special_token_0|>
def kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the Sheets et al. IKdr model """
""" Model was developed from data recorded at 21 oC """
v = voltage_clamp_func(t, voltage_clamp_params)
n = Y[0]
q10 = 1.0
if v > -31.0:
tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))
else:
tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -
131.5) / 34.8)))
ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))
ntau = tau / q10
dn = (ninf - n) / ntau
return [dn]
def km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0
if v < -60.0:
nstau = 219.0 * q10
else:
nstau = 13.0 * v + 1000.0 * q10
nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)
nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10
nftau = 1.0 / (nftau_alpha + nftau_beta)
ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))
dns = (ninf - ns) / nstau
dnf = (ninf - nf) / nftau
return [dns, dnf]
<|reserved_special_token_0|>
def cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
tfa = 1.0
ki = 0.001
cao = 2.5
""" To do: make cai variable as an input like voltage """
cai = 0.0001
celsius = 37.0
def alpha(v):
return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)
def beta(v):
return 0.29 * np.exp(-v / 10.86)
def KTF(celsius):
return 25.0 / 293.15 * (celsius + 273.15)
def efun(z):
return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for
i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius) / 2
nu = v / f
return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)
a = alpha(v)
b = beta(v)
tau = 1.0 / (tfa * (a + b))
minf = a / (a + b)
dm = (minf - m) / tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
<|reserved_special_token_0|>
def hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
def hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
def nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.7 from Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))
beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))
beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))
beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.8 used in Tigerholm model """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))
sinf = 1 / (1 + np.exp((v + 45.0) / 8))
stau = 1 / (alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))
uinf = 1 / (1 + np.exp((v + 51.0) / 8))
utau = 1.0 / (alpha_u + beta_u)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
du = (uinf - u) / utau
return [dm, dh, ds, du]
def nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.8 model used in Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_0|>
def nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.9 model from Huang Waxman 2014"""
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t, voltage_clamp_params)
alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))
beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))
beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))
beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.6 model from Zach Mainen 1994 """
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t, voltage_clamp_params)
vhalf = -43.0
a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))
b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))
m_inf = a_m / (a_m + b_m)
m_tau = 1.0 / (a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))
b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))
h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))
h_tau = 1.0 / (a_h + b_h)
dm = (m_inf - m) / m_tau
dh = (h_inf - h) / h_tau
return [dm, dh]
<|reserved_special_token_0|>
def kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the Sheets et al. IKdr model """
""" Model was developed from data recorded at 21 oC """
v = voltage_clamp_func(t, voltage_clamp_params)
n = Y[0]
q10 = 1.0
if v > -31.0:
tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))
else:
tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -
131.5) / 34.8)))
ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))
ntau = tau / q10
dn = (ninf - n) / ntau
return [dn]
def km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0
if v < -60.0:
nstau = 219.0 * q10
else:
nstau = 13.0 * v + 1000.0 * q10
nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)
nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10
nftau = 1.0 / (nftau_alpha + nftau_beta)
ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))
dns = (ninf - ns) / nstau
dnf = (ninf - nf) / nftau
return [dns, dnf]
<|reserved_special_token_0|>
def cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
tfa = 1.0
ki = 0.001
cao = 2.5
""" To do: make cai variable as an input like voltage """
cai = 0.0001
celsius = 37.0
def alpha(v):
return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)
def beta(v):
return 0.29 * np.exp(-v / 10.86)
def KTF(celsius):
return 25.0 / 293.15 * (celsius + 273.15)
def efun(z):
return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for
i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius) / 2
nu = v / f
return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)
a = alpha(v)
b = beta(v)
tau = 1.0 / (tfa * (a + b))
minf = a / (a + b)
dm = (minf - m) / tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
<|reserved_special_token_0|>
def hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
def hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
<|reserved_special_token_0|>
def nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
def nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.7 from Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))
beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))
beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))
beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.8 from Huang Waxman 20(14?) """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))
beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))
beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
def nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.8 used in Tigerholm model """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))
sinf = 1 / (1 + np.exp((v + 45.0) / 8))
stau = 1 / (alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))
uinf = 1 / (1 + np.exp((v + 51.0) / 8))
utau = 1.0 / (alpha_u + beta_u)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
du = (uinf - u) / utau
return [dm, dh, ds, du]
def nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.8 model used in Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_0|>
def nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.9 model from Huang Waxman 2014"""
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t, voltage_clamp_params)
alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))
beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))
beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))
beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.6 model from Zach Mainen 1994 """
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t, voltage_clamp_params)
vhalf = -43.0
a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))
b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))
m_inf = a_m / (a_m + b_m)
m_tau = 1.0 / (a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))
b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))
h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))
h_tau = 1.0 / (a_h + b_h)
dm = (m_inf - m) / m_tau
dh = (h_inf - h) / h_tau
return [dm, dh]
<|reserved_special_token_0|>
def kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the Sheets et al. IKdr model """
""" Model was developed from data recorded at 21 oC """
v = voltage_clamp_func(t, voltage_clamp_params)
n = Y[0]
q10 = 1.0
if v > -31.0:
tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))
else:
tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -
131.5) / 34.8)))
ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))
ntau = tau / q10
dn = (ninf - n) / ntau
return [dn]
def km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0
if v < -60.0:
nstau = 219.0 * q10
else:
nstau = 13.0 * v + 1000.0 * q10
nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)
nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10
nftau = 1.0 / (nftau_alpha + nftau_beta)
ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))
dns = (ninf - ns) / nstau
dnf = (ninf - nf) / nftau
return [dns, dnf]
<|reserved_special_token_0|>
def cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
tfa = 1.0
ki = 0.001
cao = 2.5
""" To do: make cai variable as an input like voltage """
cai = 0.0001
celsius = 37.0
def alpha(v):
return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)
def beta(v):
return 0.29 * np.exp(-v / 10.86)
def KTF(celsius):
return 25.0 / 293.15 * (celsius + 273.15)
def efun(z):
return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for
i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius) / 2
nu = v / f
return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)
a = alpha(v)
b = beta(v)
tau = 1.0 / (tfa * (a + b))
minf = a / (a + b)
dm = (minf - m) / tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
<|reserved_special_token_0|>
def hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
def hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
<|reserved_special_token_0|>
def nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
def nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.7 from Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))
beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))
beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))
beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
<|reserved_special_token_0|>
def nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.8 from Huang Waxman 20(14?) """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))
beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))
beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
def nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Rat? Nav 1.8 used in Tigerholm model """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))
sinf = 1 / (1 + np.exp((v + 45.0) / 8))
stau = 1 / (alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))
uinf = 1 / (1 + np.exp((v + 51.0) / 8))
utau = 1.0 / (alpha_u + beta_u)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
du = (uinf - u) / utau
return [dm, dh, ds, du]
def nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.8 model used in Choi Waxman 2011 """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))
beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
hinf = 1 / (1 + np.exp((v + 32.2) / 4))
htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_0|>
def nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.9 model from Huang Waxman 2014"""
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t, voltage_clamp_params)
alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))
beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))
beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))
beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))
sinf = alpha_s / (alpha_s + beta_s)
stau = 1 / (alpha_s + beta_s)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
ds = (sinf - s) / stau
return [dm, dh, ds]
def nav19md(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.9 model from Maingret 2008"""
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t, voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Nav 1.6 model from Zach Mainen 1994 """
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t, voltage_clamp_params)
vhalf = -43.0
a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))
b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))
m_inf = a_m / (a_m + b_m)
m_tau = 1.0 / (a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))
b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))
h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))
h_tau = 1.0 / (a_h + b_h)
dm = (m_inf - m) / m_tau
dh = (h_inf - h) / h_tau
return [dm, dh]
<|reserved_special_token_0|>
def kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the Sheets et al. IKdr model """
""" Model was developed from data recorded at 21 oC """
v = voltage_clamp_func(t, voltage_clamp_params)
n = Y[0]
q10 = 1.0
if v > -31.0:
tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))
else:
tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -
131.5) / 34.8)))
ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))
ntau = tau / q10
dn = (ninf - n) / ntau
return [dn]
def km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0
if v < -60.0:
nstau = 219.0 * q10
else:
nstau = 13.0 * v + 1000.0 * q10
nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)
nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10
nftau = 1.0 / (nftau_alpha + nftau_beta)
ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))
dns = (ninf - ns) / nstau
dnf = (ninf - nf) / nftau
return [dns, dnf]
<|reserved_special_token_0|>
def cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
tfa = 1.0
ki = 0.001
cao = 2.5
""" To do: make cai variable as an input like voltage """
cai = 0.0001
celsius = 37.0
def alpha(v):
return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)
def beta(v):
return 0.29 * np.exp(-v / 10.86)
def KTF(celsius):
return 25.0 / 293.15 * (celsius + 273.15)
def efun(z):
return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for
i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius) / 2
nu = v / f
return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)
a = alpha(v)
b = beta(v)
tau = 1.0 / (tfa * (a + b))
minf = a / (a + b)
dm = (minf - m) / tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
<|reserved_special_token_0|>
def hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
def hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t, voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)
tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)
dns = (ninf_s - n_s) / tau_ns
dnf = (ninf_f - n_f) / tau_nf
return [dns, dnf]
<|reserved_special_token_0|>
def nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):
""" Human Nav 1.7 from Vasylyev Waxman """
v = voltage_clamp_func(t, voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))
beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m / (alpha_m + beta_m)
mtau = 1 / (alpha_m + beta_m)
alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))
beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))
hinf = alpha_h / (alpha_h + beta_h)
htau = 1 / (alpha_h + beta_h)
dm = (minf - m) / mtau
dh = (hinf - h) / htau
return [dm, dh]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
|
flexible
|
{
"blob_id": "012ab947f7a2c9d44f54464b3e477582ffcf3d77",
"index": 5589,
"step-1": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-3": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.8 from Huang Waxman 20(14?) \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))\n beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))\n beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-4": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.8 from Huang Waxman 20(14?) \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))\n beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))\n beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\ndef nav19md(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Maingret 2008\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n return [dm, dh, ds]\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\ncurrent_models - library of ionic current models implemented in Python\n\nCreated on Mon Apr 10 16:30:04 2017\n\n@author: Oliver Britton\n\"\"\"\n\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\n\" Voltage clamp generator functions \"\n\n\n\" //--Nav models--\\\\ \"\n\n\" -- Nav 1.7 models -- \"\n\ndef nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.7 from Vasylyev Waxman \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)\n beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed\n \n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))\n beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n return [dm, dh]\n\ndef nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Rat? Nav 1.7 from Choi Waxman 2011 \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))\n beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))\n beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n\n alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))\n beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))\n\n sinf = alpha_s/(alpha_s + beta_s)\n stau = 1/(alpha_s + beta_s)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n \n return [dm, dh, ds]\n \n\" -- Nav 1.8 models -- \"\ndef nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.8 from Huang Waxman 20(14?) \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))\n beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))\n beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n\n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n \n return [dm, dh]\n\ndef nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Rat? Nav 1.8 used in Tigerholm model \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n \n alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))\n beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n hinf = 1/(1+np.exp((v+32.2)/4))\n htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))\n\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))\n\n sinf = 1/(1+np.exp((v+45.0)/8))\n stau = 1/(alpha_s + beta_s)\n\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))\n\n uinf = 1/(1+np.exp((v+51.0)/8))\n utau = 1.0/(alpha_u + beta_u) \n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n du = (uinf-u)/utau\n \n return [dm, dh, ds, du]\n \ndef nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.8 model used in Choi Waxman 2011 \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))\n beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n hinf = 1/(1+np.exp((v+32.2)/4))\n htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n \n return [dm, dh]\n \n\" -- Nav 1.9 models -- \"\n\ndef nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.9 model from Huang Waxman 2014\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))\n beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))\n beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))\n beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))\n sinf = alpha_s/(alpha_s + beta_s)\n stau = 1/(alpha_s + beta_s)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n \n return [dm, dh, ds]\n \ndef nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.9 model from Maingret 2008\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n \n return [dm, dh, ds]\n \ndef nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.6 model from Zach Mainen 1994 \"\n m = Y[0]\n h = Y[1]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n vhalf = -43.0\n a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))\n b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))\n \n m_inf = a_m/(a_m + b_m)\n m_tau = 1./(a_m + b_m)\n \n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n\n vhalf_inf = -72.0\n qinf = 6.2\n\n rate_ha = 0.0091\n rate_hb = 0.024\n\n a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))\n b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))\n\n h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))\n h_tau = 1./(a_h + b_h)\n \n dm = (m_inf-m)/m_tau\n dh = (h_inf-h)/h_tau\n \n return [dm, dh]\n\n\" Kv models \"\n\ndef kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Tigerholm version of the Sheets et al. IKdr model \"\n \" Model was developed from data recorded at 21 oC \"\n \n \n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n if v > -31.0:\n tau = 0.16+0.8*np.exp(-0.0267*(v+11))\n else:\n tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))\n\t\t\n ninf = 1/(1 + np.exp(-(v+45)/15.4))\n ntau = tau/q10\n \n dn = (ninf-n)/ntau\n return [dn]\n \ndef km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n # g = gbar * (0.25*ns + 0.75*nf)\n v = voltage_clamp_func(t,voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n if v < -60.0:\n nstau = 219.0*q10\n else:\n nstau = 13.0*v + 1000.0*q10\n \n nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)\n nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10\n nftau = 1.0/(nftau_alpha + nftau_beta)\n \n ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV\n \n dns = (ninf-ns)/nstau\n dnf = (ninf-nf)/nftau\n \n return [dns,dnf]\n \ndef ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" Tigerholm version of IA.\n \"\"\"\n # g = gbar * n * h\n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n h = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4\n ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10\n\t\t\n hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))\n htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10\n \n # Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms\n if htau < 5.0:\n htau = 5.0\n\n dn = (ninf-n)/ntau\n dh = (hinf-h)/htau\n \n return [dn,dh]\n\n\"\"\" \nCa models \n\nImplemented:\ncal_ja - Jaffe et al. 1994 ICaL model. \ncan_mi - Model of N-type Ca current from Migliore 95\n\nTo do:\nSK\nBK\nCa diffusion\n\n\n\"\"\"\n\ndef cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n \n\" HCN models \"\ndef hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n\n v = voltage_clamp_func(t,voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n\n ninf_s = 1/(1 + np.exp((v+87.2)/9.7))\n ninf_f = ninf_s\n\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)\n\n dns = (ninf_s - n_s)/tau_ns\n dnf = (ninf_f - n_f)/tau_nf\n\n return [dns, dnf]\n \ndef hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n \n ninf_s = 1/(1 + np.exp((v+87.2)/9.7))\n ninf_f = ninf_s\n\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)\n\n dns = (ninf_s - n_s)/tau_ns\n dnf = (ninf_f - n_f)/tau_nf\n \n return [dns, dnf]\n\n\"\"\"\n # ena, ek, + or -?\n Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)\n Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek) \n\n\"\"\"\n\n\" Test models \"\ndef nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.7 from Vasylyev Waxman \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)\n beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed\n \n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))\n beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n return [dm, dh]\n \n\n \n",
"step-ids": [
12,
13,
14,
15,
18
]
}
|
[
12,
13,
14,
15,
18
] |
__author__ = 'simsun'
|
normal
|
{
"blob_id": "2b746d89d34435eb5f3a5b04da61c5cc88178852",
"index": 8784,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'simsun'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
__author__ = 'gaa8664'
import pymssql
class Connection:
def __init__(self):
self.connection = pymssql.connect(server = 'gditsn033\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close()
|
normal
|
{
"blob_id": "12dc248a95a84603065e23ce8fd33163bfcd2d3e",
"index": 9295,
"step-1": "<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-3": "__author__ = 'gaa8664'\n<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-4": "__author__ = 'gaa8664'\nimport pymssql\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-5": "__author__ = 'gaa8664'\nimport pymssql\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server = 'gditsn033\\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
<|reserved_special_token_0|>
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
<|reserved_special_token_0|>
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setOptionalAttributes(schema, optional_fields)
<|reserved_special_token_0|>
setSchemataForInquiry(PatrimonyCertificate_schema)
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
registerType(PatrimonyCertificate, PROJECTNAME)
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
finalizeSchema(PatrimonyCertificate_schema)
<|reserved_special_token_1|>
__author__ = """Gauthier BASTIEN <gbastien@commune.sambreville.be>, Stephan GEULETTE
<stephan.geulette@uvcw.be>, Jean-Michel Abe <jm.abe@la-bruyere.be>"""
__docformat__ = 'plaintext'
<|reserved_special_token_0|>
optional_fields = ['architects']
schema = Schema((ReferenceField(name='architects', widget=
ReferenceBrowserWidget(allow_search=True, only_for_review_states=
'enabled', allow_browse=True, force_close_on_insert=True,
startup_directory='urban/architects',
restrict_browsing_to_startup_directory=True, wild_card_search=True,
show_index_selector=True, label=_('urban_label_architects', default=
'Architect(s)'), popup_name='contact_reference_popup'), required=False,
schemata='urban_description', multiValued=True, relationship=
'miscdemandarchitects', allowed_types='Architect'),))
setOptionalAttributes(schema, optional_fields)
PatrimonyCertificate_schema = BaseFolderSchema.copy() + getattr(GenericLicence,
'schema', Schema(())).copy() + getattr(Inquiry, 'schema', Schema(())).copy(
) + schema.copy()
setSchemataForInquiry(PatrimonyCertificate_schema)
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,
BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
schemata_order = ['urban_description', 'urban_road', 'urban_location']
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
registerType(PatrimonyCertificate, PROJECTNAME)
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
finalizeSchema(PatrimonyCertificate_schema)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#
# File: PatrimonyCertificate.py
#
# Copyright (c) 2015 by CommunesPlone
# Generator: ArchGenXML Version 2.7
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """Gauthier BASTIEN <gbastien@commune.sambreville.be>, Stephan GEULETTE
<stephan.geulette@uvcw.be>, Jean-Michel Abe <jm.abe@la-bruyere.be>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
from Products.urban import interfaces
from Products.urban.content.licence.GenericLicence import GenericLicence
from Products.urban.content.Inquiry import Inquiry
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.urban import UrbanMessage as _
from Products.urban.config import *
##code-section module-header #fill in your manual code here
from Products.urban.utils import setOptionalAttributes
from Products.urban.utils import setSchemataForInquiry
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
optional_fields = ['architects']
##/code-section module-header
schema = Schema((
ReferenceField(
name='architects',
widget=ReferenceBrowserWidget(
allow_search=True,
only_for_review_states='enabled',
allow_browse=True,
force_close_on_insert=True,
startup_directory='urban/architects',
restrict_browsing_to_startup_directory=True,
wild_card_search=True,
show_index_selector=True,
label=_('urban_label_architects', default='Architect(s)'),
popup_name='contact_reference_popup',
),
required=False,
schemata='urban_description',
multiValued=True,
relationship="miscdemandarchitects",
allowed_types='Architect',
),
),
)
##code-section after-local-schema #fill in your manual code here
setOptionalAttributes(schema, optional_fields)
##/code-section after-local-schema
PatrimonyCertificate_schema = BaseFolderSchema.copy() + \
getattr(GenericLicence, 'schema', Schema(())).copy() + \
getattr(Inquiry, 'schema', Schema(())).copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
#put the the fields coming from Inquiry in a specific schemata
setSchemataForInquiry(PatrimonyCertificate_schema)
##/code-section after-schema
class PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IPatrimonyCertificate)
meta_type = 'PatrimonyCertificate'
_at_rename_after_creation = True
schema = PatrimonyCertificate_schema
##code-section class-header #fill in your manual code here
schemata_order = ['urban_description', 'urban_road', 'urban_location']
##/code-section class-header
# Methods
# Manually created methods
security.declarePublic('getRepresentatives')
def getRepresentatives(self):
"""
"""
return self.getArchitects()
def getLastDeposit(self):
return self.getLastEvent(interfaces.IDepositEvent)
def getLastCollegeReport(self):
return self.getLastEvent(interfaces.ICollegeReportEvent)
def getLastTheLicence(self):
return self.getLastEvent(interfaces.ITheLicenceEvent)
registerType(PatrimonyCertificate, PROJECTNAME)
# end of class PatrimonyCertificate
##code-section module-footer #fill in your manual code here
def finalizeSchema(schema, folderish=False, moveDiscussion=True):
"""
Finalizes the type schema to alter some fields
"""
schema.moveField('description', after='architects')
return schema
finalizeSchema(PatrimonyCertificate_schema)
##/code-section module-footer
|
flexible
|
{
"blob_id": "6c0b2fa8166bb21a514dc188858e1de285ad9b0a",
"index": 166,
"step-1": "<mask token>\n\n\nclass PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,\n BrowserDefaultMixin):\n <mask token>\n security = ClassSecurityInfo()\n implements(interfaces.IPatrimonyCertificate)\n meta_type = 'PatrimonyCertificate'\n _at_rename_after_creation = True\n schema = PatrimonyCertificate_schema\n schemata_order = ['urban_description', 'urban_road', 'urban_location']\n security.declarePublic('getRepresentatives')\n\n def getRepresentatives(self):\n \"\"\"\n \"\"\"\n return self.getArchitects()\n\n def getLastDeposit(self):\n return self.getLastEvent(interfaces.IDepositEvent)\n\n def getLastCollegeReport(self):\n return self.getLastEvent(interfaces.ICollegeReportEvent)\n\n def getLastTheLicence(self):\n return self.getLastEvent(interfaces.ITheLicenceEvent)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,\n BrowserDefaultMixin):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n implements(interfaces.IPatrimonyCertificate)\n meta_type = 'PatrimonyCertificate'\n _at_rename_after_creation = True\n schema = PatrimonyCertificate_schema\n schemata_order = ['urban_description', 'urban_road', 'urban_location']\n security.declarePublic('getRepresentatives')\n\n def getRepresentatives(self):\n \"\"\"\n \"\"\"\n return self.getArchitects()\n\n def getLastDeposit(self):\n return self.getLastEvent(interfaces.IDepositEvent)\n\n def getLastCollegeReport(self):\n return self.getLastEvent(interfaces.ICollegeReportEvent)\n\n def getLastTheLicence(self):\n return self.getLastEvent(interfaces.ITheLicenceEvent)\n\n\n<mask token>\n\n\ndef finalizeSchema(schema, folderish=False, moveDiscussion=True):\n \"\"\"\n Finalizes the type schema to alter some fields\n \"\"\"\n schema.moveField('description', after='architects')\n return schema\n\n\n<mask token>\n",
"step-3": "<mask token>\nsetOptionalAttributes(schema, optional_fields)\n<mask token>\nsetSchemataForInquiry(PatrimonyCertificate_schema)\n\n\nclass PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,\n BrowserDefaultMixin):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n implements(interfaces.IPatrimonyCertificate)\n meta_type = 'PatrimonyCertificate'\n _at_rename_after_creation = True\n schema = PatrimonyCertificate_schema\n schemata_order = ['urban_description', 'urban_road', 'urban_location']\n security.declarePublic('getRepresentatives')\n\n def getRepresentatives(self):\n \"\"\"\n \"\"\"\n return self.getArchitects()\n\n def getLastDeposit(self):\n return self.getLastEvent(interfaces.IDepositEvent)\n\n def getLastCollegeReport(self):\n return self.getLastEvent(interfaces.ICollegeReportEvent)\n\n def getLastTheLicence(self):\n return self.getLastEvent(interfaces.ITheLicenceEvent)\n\n\nregisterType(PatrimonyCertificate, PROJECTNAME)\n\n\ndef finalizeSchema(schema, folderish=False, moveDiscussion=True):\n \"\"\"\n Finalizes the type schema to alter some fields\n \"\"\"\n schema.moveField('description', after='architects')\n return schema\n\n\nfinalizeSchema(PatrimonyCertificate_schema)\n",
"step-4": "__author__ = \"\"\"Gauthier BASTIEN <gbastien@commune.sambreville.be>, Stephan GEULETTE\n<stephan.geulette@uvcw.be>, Jean-Michel Abe <jm.abe@la-bruyere.be>\"\"\"\n__docformat__ = 'plaintext'\n<mask token>\noptional_fields = ['architects']\nschema = Schema((ReferenceField(name='architects', widget=\n ReferenceBrowserWidget(allow_search=True, only_for_review_states=\n 'enabled', allow_browse=True, force_close_on_insert=True,\n startup_directory='urban/architects',\n restrict_browsing_to_startup_directory=True, wild_card_search=True,\n show_index_selector=True, label=_('urban_label_architects', default=\n 'Architect(s)'), popup_name='contact_reference_popup'), required=False,\n schemata='urban_description', multiValued=True, relationship=\n 'miscdemandarchitects', allowed_types='Architect'),))\nsetOptionalAttributes(schema, optional_fields)\nPatrimonyCertificate_schema = BaseFolderSchema.copy() + getattr(GenericLicence,\n 'schema', Schema(())).copy() + getattr(Inquiry, 'schema', Schema(())).copy(\n ) + schema.copy()\nsetSchemataForInquiry(PatrimonyCertificate_schema)\n\n\nclass PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry,\n BrowserDefaultMixin):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n implements(interfaces.IPatrimonyCertificate)\n meta_type = 'PatrimonyCertificate'\n _at_rename_after_creation = True\n schema = PatrimonyCertificate_schema\n schemata_order = ['urban_description', 'urban_road', 'urban_location']\n security.declarePublic('getRepresentatives')\n\n def getRepresentatives(self):\n \"\"\"\n \"\"\"\n return self.getArchitects()\n\n def getLastDeposit(self):\n return self.getLastEvent(interfaces.IDepositEvent)\n\n def getLastCollegeReport(self):\n return self.getLastEvent(interfaces.ICollegeReportEvent)\n\n def getLastTheLicence(self):\n return self.getLastEvent(interfaces.ITheLicenceEvent)\n\n\nregisterType(PatrimonyCertificate, PROJECTNAME)\n\n\ndef finalizeSchema(schema, folderish=False, moveDiscussion=True):\n \"\"\"\n Finalizes the type schema to alter some fields\n \"\"\"\n schema.moveField('description', after='architects')\n return schema\n\n\nfinalizeSchema(PatrimonyCertificate_schema)\n",
"step-5": "# -*- coding: utf-8 -*-\n#\n# File: PatrimonyCertificate.py\n#\n# Copyright (c) 2015 by CommunesPlone\n# Generator: ArchGenXML Version 2.7\n# http://plone.org/products/archgenxml\n#\n# GNU General Public License (GPL)\n#\n\n__author__ = \"\"\"Gauthier BASTIEN <gbastien@commune.sambreville.be>, Stephan GEULETTE\n<stephan.geulette@uvcw.be>, Jean-Michel Abe <jm.abe@la-bruyere.be>\"\"\"\n__docformat__ = 'plaintext'\n\nfrom AccessControl import ClassSecurityInfo\nfrom Products.Archetypes.atapi import *\nfrom zope.interface import implements\nfrom Products.urban import interfaces\nfrom Products.urban.content.licence.GenericLicence import GenericLicence\nfrom Products.urban.content.Inquiry import Inquiry\nfrom Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin\n\nfrom Products.urban import UrbanMessage as _\nfrom Products.urban.config import *\n\n##code-section module-header #fill in your manual code here\nfrom Products.urban.utils import setOptionalAttributes\nfrom Products.urban.utils import setSchemataForInquiry\nfrom Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget\noptional_fields = ['architects']\n##/code-section module-header\n\nschema = Schema((\n\n ReferenceField(\n name='architects',\n widget=ReferenceBrowserWidget(\n allow_search=True,\n only_for_review_states='enabled',\n allow_browse=True,\n force_close_on_insert=True,\n startup_directory='urban/architects',\n restrict_browsing_to_startup_directory=True,\n wild_card_search=True,\n show_index_selector=True,\n label=_('urban_label_architects', default='Architect(s)'),\n popup_name='contact_reference_popup',\n ),\n required=False,\n schemata='urban_description',\n multiValued=True,\n relationship=\"miscdemandarchitects\",\n allowed_types='Architect',\n ),\n\n),\n)\n\n##code-section after-local-schema #fill in your manual code here\nsetOptionalAttributes(schema, optional_fields)\n##/code-section after-local-schema\n\nPatrimonyCertificate_schema = BaseFolderSchema.copy() + \\\n getattr(GenericLicence, 'schema', Schema(())).copy() + \\\n getattr(Inquiry, 'schema', Schema(())).copy() + \\\n schema.copy()\n\n##code-section after-schema #fill in your manual code here\n#put the the fields coming from Inquiry in a specific schemata\nsetSchemataForInquiry(PatrimonyCertificate_schema)\n##/code-section after-schema\n\nclass PatrimonyCertificate(BaseFolder, GenericLicence, Inquiry, BrowserDefaultMixin):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n implements(interfaces.IPatrimonyCertificate)\n\n meta_type = 'PatrimonyCertificate'\n _at_rename_after_creation = True\n\n schema = PatrimonyCertificate_schema\n\n ##code-section class-header #fill in your manual code here\n schemata_order = ['urban_description', 'urban_road', 'urban_location']\n ##/code-section class-header\n\n # Methods\n\n # Manually created methods\n\n security.declarePublic('getRepresentatives')\n def getRepresentatives(self):\n \"\"\"\n \"\"\"\n return self.getArchitects()\n\n def getLastDeposit(self):\n return self.getLastEvent(interfaces.IDepositEvent)\n\n def getLastCollegeReport(self):\n return self.getLastEvent(interfaces.ICollegeReportEvent)\n\n def getLastTheLicence(self):\n return self.getLastEvent(interfaces.ITheLicenceEvent)\n\n\n\nregisterType(PatrimonyCertificate, PROJECTNAME)\n# end of class PatrimonyCertificate\n\n##code-section module-footer #fill in your manual code here\ndef finalizeSchema(schema, folderish=False, moveDiscussion=True):\n \"\"\"\n Finalizes the type schema to alter some fields\n \"\"\"\n schema.moveField('description', after='architects')\n return schema\n\nfinalizeSchema(PatrimonyCertificate_schema)\n##/code-section module-footer\n\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
# message 为定义的变量
message = 'Hello Python World '
print(message)
|
normal
|
{
"blob_id": "ee5e970f32b1d601f9dc3ab37a5028ce7ff8a32e",
"index": 1368,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(message)\n",
"step-3": "message = 'Hello Python World '\nprint(message)\n",
"step-4": "# message 为定义的变量\r\nmessage = 'Hello Python World '\r\nprint(message)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class User:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record)
currPos = motor.wm()
return scan([daq], motor, currPos + start, currPos + end, nsteps)
def ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(scan([daq], motor, start, end, nsteps))
<|reserved_special_token_0|>
def dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
currPos = motor.wm()
RE(scan([daq], motor, currPos + start, currPos + end, nsteps))
<|reserved_special_token_0|>
def setPP_flipflip(self, nshots=20, deltaShots=30):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
seq.rep_count.put(nshots)
beamDelay = int(delta_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
seq.sequence.put_seq(ff_seq)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):
"""RE the plan."""
self.set_pp_flipflop()
RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=
record, use_l3t=use_l3t))
def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,
use_l3t=None):
"""Configure daq and do the scan, trust other code to set up the sequencer."""
yield from configure(daq, events=None, duration=None, record=record,
use_l3t=use_l3t, controls=[evr])
yield from scan([daq, seq], evr, start, end, nsteps)
def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,
flyStop, deltaT_shots, record=False, pp_shot_delay=2):
daq.disconnect()
shiftMotor = foil_y
flyMotor = foil_x
self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,
pp_shot_delay=pp_shot_delay)
daq.configure(-1, record=record, controls=[foil_x, foil_y])
if isinstance(shiftSteps, int):
RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
else:
RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
<|reserved_special_token_0|>
def prepare_seq_PPburst(self, nShots=None, nOffShots=None):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)
seq.sequence.put_seq(ff_seq)
def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):
single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots
)
ff_seq = []
for i in range(nTimes):
ff_seq += single_burst
return ff_seq
<|reserved_special_token_0|>
def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User:
def __init__(self):
self._sync_markers = {(0.5): 0, (1): 1, (5): 2, (10): 3, (30): 4, (
60): 5, (120): 6, (360): 7}
self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0', name='evr_pp')
self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name=
'pp_delay')
with safe_load('sam_x'):
self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')
with safe_load('sam_y'):
self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')
with safe_load('sam_z'):
self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')
with safe_load('op_focus'):
self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')
with safe_load('op_x'):
self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')
with safe_load('op_y'):
self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')
def takeRun(self, nEvents, record=True):
daq.configure(events=120, record=record)
daq.begin(events=nEvents)
daq.wait()
daq.end_run()
<|reserved_special_token_0|>
def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record)
currPos = motor.wm()
return scan([daq], motor, currPos + start, currPos + end, nsteps)
def ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(scan([daq], motor, start, end, nsteps))
def listscan(self, motor, posList, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(list_scan([daq], motor, posList))
def dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
currPos = motor.wm()
RE(scan([daq], motor, currPos + start, currPos + end, nsteps))
def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2
):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
beamDelay = int(120 * deltaT_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
flyspeed = flymotor.velocity.get()
flytime = distance / flyspeed
flyshots = int(flytime / deltaT_shots)
seq.rep_count.put(flyshots)
seq.sequence.put_seq(fly_seq)
def setPP_flipflip(self, nshots=20, deltaShots=30):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
seq.rep_count.put(nshots)
beamDelay = int(delta_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
seq.sequence.put_seq(ff_seq)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):
"""RE the plan."""
self.set_pp_flipflop()
RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=
record, use_l3t=use_l3t))
def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,
use_l3t=None):
"""Configure daq and do the scan, trust other code to set up the sequencer."""
yield from configure(daq, events=None, duration=None, record=record,
use_l3t=use_l3t, controls=[evr])
yield from scan([daq, seq], evr, start, end, nsteps)
def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,
flyStop, deltaT_shots, record=False, pp_shot_delay=2):
daq.disconnect()
shiftMotor = foil_y
flyMotor = foil_x
self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,
pp_shot_delay=pp_shot_delay)
daq.configure(-1, record=record, controls=[foil_x, foil_y])
if isinstance(shiftSteps, int):
RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
else:
RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
def PPburst_sequence(self, nShots=None, nOffShots=2):
if nOffShots < 2:
raise ValueError('Minimum offshots is 2')
ff_seq = [[185, 0, 0, 0]]
ff_seq.append([179, 1, 0, 0])
ff_seq.append([179, 1, 0, 0])
if nShots is not None:
if isinstance(nShots, int):
ff_seq.append([185, nShots - 2, 0, 0])
else:
ff_seq.append([185, int(nShots * 120) - 2, 0, 0])
ff_seq.append([179, 2, 0, 0])
if nShots is not None:
if isinstance(nShots, int):
for i in range(nOffShots - 2):
ff_seq.append([179, 1, 0, 0])
else:
for i in range(int(nOffShots * 120) - 2):
ff_seq.append([179, 1, 0, 0])
return ff_seq
def prepare_seq_PPburst(self, nShots=None, nOffShots=None):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)
seq.sequence.put_seq(ff_seq)
def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):
single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots
)
ff_seq = []
for i in range(nTimes):
ff_seq += single_burst
return ff_seq
def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1
):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=
nOffShots, nTimes=nTimes)
seq.sequence.put_seq(ff_seq)
def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_y.umv(yStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_y.mv(yEnd)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)
self.sam_y.mv(yStart)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst_window(self, xStart, xEnd, yDelta, nRoundTrips,
sweepTime, windowlist):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for j in windowList:
self.sam_y.umv(windowList)
self.sam_y.wait()
print('Windos position %f' % self.sam_w.wm())
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.05)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
self.sam_x.mv(xStart)
sleep(0.05)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,
windowList, startgrid):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
sleeptime is the pp close time between window
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
self.sam_y.umv(windowList[startgrid])
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for j in range(len(windowList) - startgrid):
self.sam_y.umv(windowList[startgrid + j])
self.sam_y.wait()
print('Windos position %f' % self.sam_y.wm())
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition', self.sam_y.wm())
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition', self.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User:
def __init__(self):
self._sync_markers = {(0.5): 0, (1): 1, (5): 2, (10): 3, (30): 4, (
60): 5, (120): 6, (360): 7}
self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0', name='evr_pp')
self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name=
'pp_delay')
with safe_load('sam_x'):
self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')
with safe_load('sam_y'):
self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')
with safe_load('sam_z'):
self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')
with safe_load('op_focus'):
self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')
with safe_load('op_x'):
self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')
with safe_load('op_y'):
self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')
def takeRun(self, nEvents, record=True):
daq.configure(events=120, record=record)
daq.begin(events=nEvents)
daq.wait()
daq.end_run()
def get_ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
return scan([daq], motor, start, end, nsteps)
def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record)
currPos = motor.wm()
return scan([daq], motor, currPos + start, currPos + end, nsteps)
def ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(scan([daq], motor, start, end, nsteps))
def listscan(self, motor, posList, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(list_scan([daq], motor, posList))
def dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
currPos = motor.wm()
RE(scan([daq], motor, currPos + start, currPos + end, nsteps))
def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2
):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
beamDelay = int(120 * deltaT_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
flyspeed = flymotor.velocity.get()
flytime = distance / flyspeed
flyshots = int(flytime / deltaT_shots)
seq.rep_count.put(flyshots)
seq.sequence.put_seq(fly_seq)
def setPP_flipflip(self, nshots=20, deltaShots=30):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
seq.rep_count.put(nshots)
beamDelay = int(delta_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
seq.sequence.put_seq(ff_seq)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):
"""RE the plan."""
self.set_pp_flipflop()
RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=
record, use_l3t=use_l3t))
def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,
use_l3t=None):
"""Configure daq and do the scan, trust other code to set up the sequencer."""
yield from configure(daq, events=None, duration=None, record=record,
use_l3t=use_l3t, controls=[evr])
yield from scan([daq, seq], evr, start, end, nsteps)
def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,
flyStop, deltaT_shots, record=False, pp_shot_delay=2):
daq.disconnect()
shiftMotor = foil_y
flyMotor = foil_x
self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,
pp_shot_delay=pp_shot_delay)
daq.configure(-1, record=record, controls=[foil_x, foil_y])
if isinstance(shiftSteps, int):
RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
else:
RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
def PPburst_sequence(self, nShots=None, nOffShots=2):
if nOffShots < 2:
raise ValueError('Minimum offshots is 2')
ff_seq = [[185, 0, 0, 0]]
ff_seq.append([179, 1, 0, 0])
ff_seq.append([179, 1, 0, 0])
if nShots is not None:
if isinstance(nShots, int):
ff_seq.append([185, nShots - 2, 0, 0])
else:
ff_seq.append([185, int(nShots * 120) - 2, 0, 0])
ff_seq.append([179, 2, 0, 0])
if nShots is not None:
if isinstance(nShots, int):
for i in range(nOffShots - 2):
ff_seq.append([179, 1, 0, 0])
else:
for i in range(int(nOffShots * 120) - 2):
ff_seq.append([179, 1, 0, 0])
return ff_seq
def prepare_seq_PPburst(self, nShots=None, nOffShots=None):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)
seq.sequence.put_seq(ff_seq)
def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):
single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots
)
ff_seq = []
for i in range(nTimes):
ff_seq += single_burst
return ff_seq
def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1
):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=
nOffShots, nTimes=nTimes)
seq.sequence.put_seq(ff_seq)
def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_y.umv(yStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_y.mv(yEnd)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)
self.sam_y.mv(yStart)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst_window(self, xStart, xEnd, yDelta, nRoundTrips,
sweepTime, windowlist):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for j in windowList:
self.sam_y.umv(windowList)
self.sam_y.wait()
print('Windos position %f' % self.sam_w.wm())
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.05)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
self.sam_x.mv(xStart)
sleep(0.05)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,
windowList, startgrid):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
sleeptime is the pp close time between window
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
self.sam_y.umv(windowList[startgrid])
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for j in range(len(windowList) - startgrid):
self.sam_y.umv(windowList[startgrid + j])
self.sam_y.wait()
print('Windos position %f' % self.sam_y.wm())
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition', self.sam_y.wm())
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition', self.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User:
def __init__(self):
self._sync_markers = {(0.5): 0, (1): 1, (5): 2, (10): 3, (30): 4, (
60): 5, (120): 6, (360): 7}
self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0', name='evr_pp')
self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name=
'pp_delay')
with safe_load('sam_x'):
self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')
with safe_load('sam_y'):
self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')
with safe_load('sam_z'):
self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')
with safe_load('op_focus'):
self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')
with safe_load('op_x'):
self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')
with safe_load('op_y'):
self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')
def takeRun(self, nEvents, record=True):
daq.configure(events=120, record=record)
daq.begin(events=nEvents)
daq.wait()
daq.end_run()
def get_ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
return scan([daq], motor, start, end, nsteps)
def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record)
currPos = motor.wm()
return scan([daq], motor, currPos + start, currPos + end, nsteps)
def ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(scan([daq], motor, start, end, nsteps))
def listscan(self, motor, posList, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(list_scan([daq], motor, posList))
def dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
currPos = motor.wm()
RE(scan([daq], motor, currPos + start, currPos + end, nsteps))
def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2
):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
beamDelay = int(120 * deltaT_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
flyspeed = flymotor.velocity.get()
flytime = distance / flyspeed
flyshots = int(flytime / deltaT_shots)
seq.rep_count.put(flyshots)
seq.sequence.put_seq(fly_seq)
def setPP_flipflip(self, nshots=20, deltaShots=30):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(1)
seq.rep_count.put(nshots)
beamDelay = int(delta_shots) - pp_shot_delay
if beamDelay + pp_shot_delay < 4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]
seq.sequence.put_seq(ff_seq)
def set_pp_flipflop(self):
pp.flipflop(wait=True)
def runflipflip(self, start, end, nsteps, nshots=20, deltaShots=30):
self.set_pp_flipflop()
for i in nsteps:
self.evr_pp.ns_delay.set(start + delta * i)
seq.start()
time.sleep(5)
def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):
"""RE the plan."""
self.set_pp_flipflop()
RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=
record, use_l3t=use_l3t))
def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,
use_l3t=None):
"""Configure daq and do the scan, trust other code to set up the sequencer."""
yield from configure(daq, events=None, duration=None, record=record,
use_l3t=use_l3t, controls=[evr])
yield from scan([daq, seq], evr, start, end, nsteps)
def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,
flyStop, deltaT_shots, record=False, pp_shot_delay=2):
daq.disconnect()
shiftMotor = foil_y
flyMotor = foil_x
self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,
pp_shot_delay=pp_shot_delay)
daq.configure(-1, record=record, controls=[foil_x, foil_y])
if isinstance(shiftSteps, int):
RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
else:
RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,
shiftSteps), flyMotor, [flyStart, flyStop], seq))
def PPburst_sequence(self, nShots=None, nOffShots=2):
if nOffShots < 2:
raise ValueError('Minimum offshots is 2')
ff_seq = [[185, 0, 0, 0]]
ff_seq.append([179, 1, 0, 0])
ff_seq.append([179, 1, 0, 0])
if nShots is not None:
if isinstance(nShots, int):
ff_seq.append([185, nShots - 2, 0, 0])
else:
ff_seq.append([185, int(nShots * 120) - 2, 0, 0])
ff_seq.append([179, 2, 0, 0])
if nShots is not None:
if isinstance(nShots, int):
for i in range(nOffShots - 2):
ff_seq.append([179, 1, 0, 0])
else:
for i in range(int(nOffShots * 120) - 2):
ff_seq.append([179, 1, 0, 0])
return ff_seq
def prepare_seq_PPburst(self, nShots=None, nOffShots=None):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)
seq.sequence.put_seq(ff_seq)
def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):
single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots
)
ff_seq = []
for i in range(nTimes):
ff_seq += single_burst
return ff_seq
def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1
):
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0)
ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=
nOffShots, nTimes=nTimes)
seq.sequence.put_seq(ff_seq)
def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos', x.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_y.umv(yStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_y.mv(yEnd)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)
self.sam_y.mv(yStart)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst_window(self, xStart, xEnd, yDelta, nRoundTrips,
sweepTime, windowlist):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for j in windowList:
self.sam_y.umv(windowList)
self.sam_y.wait()
print('Windos position %f' % self.sam_w.wm())
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.05)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
self.sam_x.mv(xStart)
sleep(0.05)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,
windowList, startgrid):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
sleeptime is the pp close time between window
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
self.sam_y.umv(windowList[startgrid])
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
for j in range(len(windowList) - startgrid):
self.sam_y.umv(windowList[startgrid + j])
self.sam_y.wait()
print('Windos position %f' % self.sam_y.wm())
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i + 1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition', self.sam_y.wm())
sleep(1.2)
self.sam_x.mv(xStart)
sleep(0.1)
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition', self.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
<|reserved_special_token_1|>
from subprocess import check_output
import json
import sys
import time
import os
import numpy as np
from hutch_python.utils import safe_load
from ophyd import EpicsSignalRO
from ophyd import EpicsSignal
from bluesky import RunEngine
from bluesky.plans import scan
from bluesky.plans import list_scan
from bluesky.plan_stubs import configure
#from bluesky.plans import list_grid_scan
from ophyd import Component as Cpt
from ophyd import Device
from pcdsdevices.epics_motor import Newport, IMS, MMC100
from pcdsdevices.interface import BaseInterface
from pcdsdevices.device_types import Trigger
from pcdsdevices.areadetector import plugins
from cxi.db import daq, seq
from cxi.db import camviewer
from cxi.db import RE
from cxi.db import foil_x, foil_y
from cxi.db import cxi_pulsepicker as pp, seq
from cxi.db import bp, bpp, bps
from cxi.plans import serp_seq_scan
from time import sleep
class User():
def __init__(self):
self._sync_markers = {0.5:0, 1:1, 5:2, 10:3, 30:4, 60:5, 120:6, 360:7}
self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0',name='evr_pp')
self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name='pp_delay')
with safe_load('sam_x'):
self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')
with safe_load('sam_y'):
self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')
with safe_load('sam_z'):
self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')
#with safe_load('sam_pitch'):
# self.sam_pitch = MMC100('CXI:USR:MMC:01', name='sam_pitch')
#with safe_load('post_sam_x'):
# self.post_sam_x = IMS('CXI:USR:MMS:27', name='post_sam_x')
#with safe_load('post_sam_y'):
# self.post_sam_y = MMC100('CXI:USR:MMC:02', name='post_sam_y')
#with safe_load('post_sam_z'):
# self.post_sam_z = MMC100('CXI:USR:MMC:03', name='post_sam_z')
with safe_load('op_focus'):
self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')
with safe_load('op_x'):
self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')
with safe_load('op_y'):
self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')
def takeRun(self, nEvents, record=True):
daq.configure(events=120, record=record)
daq.begin(events=nEvents)
daq.wait()
daq.end_run()
def get_ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
return scan([daq], motor, start, end, nsteps)
def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record)
currPos = motor.wm()
return scan([daq], motor, currPos+start, currPos+end, nsteps)
def ascan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(scan([daq], motor, start, end, nsteps))
def listscan(self, motor, posList, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
RE(list_scan([daq], motor, posList))
def dscan(self, motor, start, end, nsteps, nEvents, record=True):
daq.configure(nEvents, record=record, controls=[motor])
currPos = motor.wm()
RE(scan([daq], motor, currPos+start, currPos+end, nsteps))
def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2):
## Setup sequencer for requested rate
#sync_mark = int(self._sync_markers[self._rate])
#leave the sync marker: assume no dropping.
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
#seq.play_mode.put(0) # Run sequence once
seq.play_mode.put(1) # Run sequence N Times
# Determine the different sequences needed
beamDelay = int(120*deltaT_shots)-pp_shot_delay
if (beamDelay+pp_shot_delay)<4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
fly_seq = [[185, beamDelay, 0, 0],
[187, pp_shot_delay, 0, 0]]
#logging.debug("Sequence: {}".format(fly_seq))
#calculate how often to shoot in requested distance
flyspeed = flymotor.velocity.get()
flytime = distance/flyspeed
flyshots = int(flytime/deltaT_shots)
seq.rep_count.put(flyshots) # Run sequence N Times
seq.sequence.put_seq(fly_seq)
def setPP_flipflip(self, nshots=20, deltaShots=30):
## Setup sequencer for requested rate
#sync_mark = int(self._sync_markers[self._rate])
#leave the sync marker: assume no dropping.
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
#seq.play_mode.put(0) # Run sequence once
seq.play_mode.put(1) # Run sequence N Times
seq.rep_count.put(nshots) # Run sequence N Times
# Determine the different sequences needed
beamDelay = int(delta_shots)-pp_shot_delay
if (beamDelay+pp_shot_delay)<4:
print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')
return
ff_seq = [[185, beamDelay, 0, 0],
[187, pp_shot_delay, 0, 0]]
#logging.debug("Sequence: {}".format(fly_seq))
seq.sequence.put_seq(ff_seq)
def set_pp_flipflop(self):
pp.flipflop(wait=True)
def runflipflip(self, start, end, nsteps,nshots=20, deltaShots=30):
self.set_pp_flipflop()
#self.setPP_flipflip(nshots=20, deltaShots=6)
for i in nsteps:
self.evr_pp.ns_delay.set(start+delta*i)
seq.start()
time.sleep(5)
def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):
"""RE the plan."""
self.set_pp_flipflop()
RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps,
record=record, use_l3t=use_l3t))
def evr_seq_plan(self, daq, seq, evr, start, end, nsteps,
record=None, use_l3t=None):
"""Configure daq and do the scan, trust other code to set up the sequencer."""
yield from configure(daq, events=None, duration=None, record=record,
use_l3t=use_l3t, controls=[evr])
yield from scan([daq, seq], evr, start, end, nsteps)
def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart, flyStop, deltaT_shots, record=False, pp_shot_delay=2):
daq.disconnect() #make sure we start from fresh point.
shiftMotor=foil_y
flyMotor=foil_x
self.setupSequencer(flyMotor, abs(flyStop-flyStart), deltaT_shots, pp_shot_delay=pp_shot_delay)
daq.configure(-1, record=record, controls=[foil_x, foil_y])
#daq.begin(-1)
if isinstance(shiftSteps, int):
RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop, shiftSteps), flyMotor, [flyStart, flyStop], seq))
else:
RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop, shiftSteps), flyMotor, [flyStart, flyStop], seq))
def PPburst_sequence(self, nShots=None, nOffShots=2):
if nOffShots < 2:
raise ValueError('Minimum offshots is 2')
ff_seq = [[185, 0, 0, 0]]
ff_seq.append([179, 1 , 0, 0])
ff_seq.append([179, 1 , 0, 0])
if nShots is not None:
if isinstance(nShots , int):
ff_seq.append([185, nShots-2, 0, 0])
else:
ff_seq.append([185, int(nShots*120)-2, 0, 0])
ff_seq.append([179, 2, 0, 0])
if nShots is not None:
if isinstance(nShots , int):
for i in range(nOffShots-2):
ff_seq.append([179, 1, 0, 0])
else:
for i in range(int(nOffShots*120)-2):
ff_seq.append([179, 1, 0, 0])
return ff_seq
def prepare_seq_PPburst(self, nShots=None, nOffShots=None):
## Setup sequencer for requested rate
#sync_mark = int(self._sync_markers[self._rate])
#leave the sync marker: assume no dropping.
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0) # Run sequence once
#seq.play_mode.put(1) # Run sequence N Times
#seq.rep_count.put(nshots) # Run sequence N Times
ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)
seq.sequence.put_seq(ff_seq)
def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):
single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)
ff_seq = []
for i in range(nTimes):
ff_seq += single_burst
return ff_seq
def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1):
## Setup sequencer for requested rate
#sync_mark = int(self._sync_markers[self._rate])
#leave the sync marker: assume no dropping.
sync_mark = int(self._sync_markers[120])
seq.sync_marker.put(sync_mark)
seq.play_mode.put(0) # Run sequence once
#seq.play_mode.put(1) # Run sequence N Times
#seq.rep_count.put(nshots) # Run sequence N Times
ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=nOffShots, nTimes=nTimes)
seq.sequence.put_seq(ff_seq)
def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
# looping through n round trips
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i+1))
self.sam_x.mv(xEnd)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)#orignal was 1
self.sam_x.mv(xStart)
sleep(0.1)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos',x.sam_y.wm())
sleep(1.2)#original was 1
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
# looping through n round trips
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i+1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()
#sleep(sweepTime)
#pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1.2)#orignal was 1
self.sam_x.mv(xStart)
sleep(0.1)
#pp.open()
#sleep(sweepTime)
#pp.close()
seq.start()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('ypos',x.sam_y.wm())
sleep(1.2)#original was 1
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
self.sam_y.umv(yStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
# looping through n round trips
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i+1))
self.sam_y.mv(yEnd)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)#orignal was 1
self.sam_y.mv(yStart)
sleep(0.05)
pp.open()
sleep(sweepTime)
pp.close()
self.sam_y.wait()
self.sam_x.mvr(xDelta)
sleep(1.2)#original was 1
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst_window(self,xStart,xEnd,yDelta, nRoundTrips, sweepTime,windowlist):#for burst mode
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
Need some testing how to deal with intermittent motion errors.
"""
#windowList = np.zeros([numYwindow,numXwindow],dtype=object)
self.sam_x.umv(xStart)
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
# looping through n round trips
for j in (windowList):
self.sam_y.umv(windowList)
self.sam_y.wait()
print('Windos position %f'%(self.sam_w.wm()))
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i+1))
self.sam_x.mv(xEnd)
sleep(0.05)
seq.start()#start sequence Need to be set
#sleep(sweepTime)
#pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)#wait for turning around
self.sam_x.mv(xStart)
sleep(0.05)
#pp.open()
seq.start()#start sequence
#sleep(sweepTime)
#pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
sleep(1)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,windowList,startgrid):#for burst mode
"""
simple rastering for running at 120Hz with shutter open/close before
and after motion stop.
sleeptime is the pp close time between window
Need some testing how to deal with intermittent motion errors.
"""
self.sam_x.umv(xStart)
self.sam_y.umv(windowList[startgrid])
daq.connect()
daq.begin()
sleep(2)
print('Reached horizontal start position')
# looping through n round trips
for j in range(len(windowList)-startgrid):
self.sam_y.umv(windowList[startgrid+j])
self.sam_y.wait()
print('Windos position %f'%(self.sam_y.wm()))
for i in range(nRoundTrips):
try:
print('starting round trip %d' % (i+1))
self.sam_x.mv(xEnd)
sleep(0.1)
seq.start()#start sequence Need to be set
#sleep(sweepTime)
#pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition',self.sam_y.wm())
sleep(1.2)#wait for turning around
self.sam_x.mv(xStart)
sleep(0.1)
#pp.open()
seq.start()#start sequence
#sleep(sweepTime)
#pp.close()
self.sam_x.wait()
self.sam_y.mvr(yDelta)
print('yposition',self.sam_y.wm())
sleep(1.2)
except:
print('round trip %d didn not end happily' % i)
daq.end_run()
daq.disconnect()
#daq.end()
#def run_serp_seq_scan_expl(self, yStart, yStop, ySteps, flyStart, flyStop, deltaT_shots, record=False, pp_shot_delay=1):
# daq.disconnect() #make sure we start from fresh point.
# self.setupSequencer(foil_y, abs(flyStop-flyStart), deltaT_shots, pp_shot_delay=pp_shot_delay)
# daq.configure(-1, record=record, controls=[foil_x, foil_y])
#daq.begin(-1)
# if isinstance(ySteps, int):
# RE(serp_seq_scan(foil_x, np.linspace(yStart, yStop, ySteps), foil_y, [flyStart, flyStop], seq))
# else:
# RE(serp_seq_scan(foil_x, np.arange(yStart, yStop, ySteps), foil_y, [flyStart, flyStop], seq))
# #daq.end()
|
flexible
|
{
"blob_id": "4473971552aa48236b19dec7e7c1ea1e622d5795",
"index": 7347,
"step-1": "<mask token>\n\n\nclass User:\n <mask token>\n <mask token>\n <mask token>\n\n def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record)\n currPos = motor.wm()\n return scan([daq], motor, currPos + start, currPos + end, nsteps)\n\n def ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(scan([daq], motor, start, end, nsteps))\n <mask token>\n\n def dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n currPos = motor.wm()\n RE(scan([daq], motor, currPos + start, currPos + end, nsteps))\n <mask token>\n\n def setPP_flipflip(self, nshots=20, deltaShots=30):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n seq.rep_count.put(nshots)\n beamDelay = int(delta_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n seq.sequence.put_seq(ff_seq)\n <mask token>\n <mask token>\n\n def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):\n \"\"\"RE the plan.\"\"\"\n self.set_pp_flipflop()\n RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=\n record, use_l3t=use_l3t))\n\n def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,\n use_l3t=None):\n \"\"\"Configure daq and do the scan, trust other code to set up the sequencer.\"\"\"\n yield from configure(daq, events=None, duration=None, record=record,\n use_l3t=use_l3t, controls=[evr])\n yield from scan([daq, seq], evr, start, end, nsteps)\n\n def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,\n flyStop, deltaT_shots, record=False, pp_shot_delay=2):\n daq.disconnect()\n shiftMotor = foil_y\n flyMotor = foil_x\n self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,\n pp_shot_delay=pp_shot_delay)\n daq.configure(-1, record=record, controls=[foil_x, foil_y])\n if isinstance(shiftSteps, int):\n RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n else:\n RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n <mask token>\n\n def prepare_seq_PPburst(self, nShots=None, nOffShots=None):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)\n seq.sequence.put_seq(ff_seq)\n\n def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):\n single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots\n )\n ff_seq = []\n for i in range(nTimes):\n ff_seq += single_burst\n return ff_seq\n <mask token>\n\n def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass User:\n\n def __init__(self):\n self._sync_markers = {(0.5): 0, (1): 1, (5): 2, (10): 3, (30): 4, (\n 60): 5, (120): 6, (360): 7}\n self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0', name='evr_pp')\n self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name=\n 'pp_delay')\n with safe_load('sam_x'):\n self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')\n with safe_load('sam_y'):\n self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')\n with safe_load('sam_z'):\n self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')\n with safe_load('op_focus'):\n self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')\n with safe_load('op_x'):\n self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')\n with safe_load('op_y'):\n self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')\n\n def takeRun(self, nEvents, record=True):\n daq.configure(events=120, record=record)\n daq.begin(events=nEvents)\n daq.wait()\n daq.end_run()\n <mask token>\n\n def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record)\n currPos = motor.wm()\n return scan([daq], motor, currPos + start, currPos + end, nsteps)\n\n def ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(scan([daq], motor, start, end, nsteps))\n\n def listscan(self, motor, posList, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(list_scan([daq], motor, posList))\n\n def dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n currPos = motor.wm()\n RE(scan([daq], motor, currPos + start, currPos + end, nsteps))\n\n def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2\n ):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n beamDelay = int(120 * deltaT_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n flyspeed = flymotor.velocity.get()\n flytime = distance / flyspeed\n flyshots = int(flytime / deltaT_shots)\n seq.rep_count.put(flyshots)\n seq.sequence.put_seq(fly_seq)\n\n def setPP_flipflip(self, nshots=20, deltaShots=30):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n seq.rep_count.put(nshots)\n beamDelay = int(delta_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n seq.sequence.put_seq(ff_seq)\n <mask token>\n <mask token>\n\n def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):\n \"\"\"RE the plan.\"\"\"\n self.set_pp_flipflop()\n RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=\n record, use_l3t=use_l3t))\n\n def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,\n use_l3t=None):\n \"\"\"Configure daq and do the scan, trust other code to set up the sequencer.\"\"\"\n yield from configure(daq, events=None, duration=None, record=record,\n use_l3t=use_l3t, controls=[evr])\n yield from scan([daq, seq], evr, start, end, nsteps)\n\n def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,\n flyStop, deltaT_shots, record=False, pp_shot_delay=2):\n daq.disconnect()\n shiftMotor = foil_y\n flyMotor = foil_x\n self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,\n pp_shot_delay=pp_shot_delay)\n daq.configure(-1, record=record, controls=[foil_x, foil_y])\n if isinstance(shiftSteps, int):\n RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n else:\n RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n\n def PPburst_sequence(self, nShots=None, nOffShots=2):\n if nOffShots < 2:\n raise ValueError('Minimum offshots is 2')\n ff_seq = [[185, 0, 0, 0]]\n ff_seq.append([179, 1, 0, 0])\n ff_seq.append([179, 1, 0, 0])\n if nShots is not None:\n if isinstance(nShots, int):\n ff_seq.append([185, nShots - 2, 0, 0])\n else:\n ff_seq.append([185, int(nShots * 120) - 2, 0, 0])\n ff_seq.append([179, 2, 0, 0])\n if nShots is not None:\n if isinstance(nShots, int):\n for i in range(nOffShots - 2):\n ff_seq.append([179, 1, 0, 0])\n else:\n for i in range(int(nOffShots * 120) - 2):\n ff_seq.append([179, 1, 0, 0])\n return ff_seq\n\n def prepare_seq_PPburst(self, nShots=None, nOffShots=None):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)\n seq.sequence.put_seq(ff_seq)\n\n def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):\n single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots\n )\n ff_seq = []\n for i in range(nTimes):\n ff_seq += single_burst\n return ff_seq\n\n def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1\n ):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=\n nOffShots, nTimes=nTimes)\n seq.sequence.put_seq(ff_seq)\n\n def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_y.umv(yStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_y.mv(yEnd)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)\n self.sam_y.mv(yStart)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst_window(self, xStart, xEnd, yDelta, nRoundTrips,\n sweepTime, windowlist):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for j in windowList:\n self.sam_y.umv(windowList)\n self.sam_y.wait()\n print('Windos position %f' % self.sam_w.wm())\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.05)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n self.sam_x.mv(xStart)\n sleep(0.05)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,\n windowList, startgrid):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n sleeptime is the pp close time between window \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n self.sam_y.umv(windowList[startgrid])\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for j in range(len(windowList) - startgrid):\n self.sam_y.umv(windowList[startgrid + j])\n self.sam_y.wait()\n print('Windos position %f' % self.sam_y.wm())\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition', self.sam_y.wm())\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition', self.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n",
"step-3": "<mask token>\n\n\nclass User:\n\n def __init__(self):\n self._sync_markers = {(0.5): 0, (1): 1, (5): 2, (10): 3, (30): 4, (\n 60): 5, (120): 6, (360): 7}\n self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0', name='evr_pp')\n self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name=\n 'pp_delay')\n with safe_load('sam_x'):\n self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')\n with safe_load('sam_y'):\n self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')\n with safe_load('sam_z'):\n self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')\n with safe_load('op_focus'):\n self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')\n with safe_load('op_x'):\n self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')\n with safe_load('op_y'):\n self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')\n\n def takeRun(self, nEvents, record=True):\n daq.configure(events=120, record=record)\n daq.begin(events=nEvents)\n daq.wait()\n daq.end_run()\n\n def get_ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n return scan([daq], motor, start, end, nsteps)\n\n def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record)\n currPos = motor.wm()\n return scan([daq], motor, currPos + start, currPos + end, nsteps)\n\n def ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(scan([daq], motor, start, end, nsteps))\n\n def listscan(self, motor, posList, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(list_scan([daq], motor, posList))\n\n def dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n currPos = motor.wm()\n RE(scan([daq], motor, currPos + start, currPos + end, nsteps))\n\n def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2\n ):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n beamDelay = int(120 * deltaT_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n flyspeed = flymotor.velocity.get()\n flytime = distance / flyspeed\n flyshots = int(flytime / deltaT_shots)\n seq.rep_count.put(flyshots)\n seq.sequence.put_seq(fly_seq)\n\n def setPP_flipflip(self, nshots=20, deltaShots=30):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n seq.rep_count.put(nshots)\n beamDelay = int(delta_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n seq.sequence.put_seq(ff_seq)\n <mask token>\n <mask token>\n\n def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):\n \"\"\"RE the plan.\"\"\"\n self.set_pp_flipflop()\n RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=\n record, use_l3t=use_l3t))\n\n def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,\n use_l3t=None):\n \"\"\"Configure daq and do the scan, trust other code to set up the sequencer.\"\"\"\n yield from configure(daq, events=None, duration=None, record=record,\n use_l3t=use_l3t, controls=[evr])\n yield from scan([daq, seq], evr, start, end, nsteps)\n\n def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,\n flyStop, deltaT_shots, record=False, pp_shot_delay=2):\n daq.disconnect()\n shiftMotor = foil_y\n flyMotor = foil_x\n self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,\n pp_shot_delay=pp_shot_delay)\n daq.configure(-1, record=record, controls=[foil_x, foil_y])\n if isinstance(shiftSteps, int):\n RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n else:\n RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n\n def PPburst_sequence(self, nShots=None, nOffShots=2):\n if nOffShots < 2:\n raise ValueError('Minimum offshots is 2')\n ff_seq = [[185, 0, 0, 0]]\n ff_seq.append([179, 1, 0, 0])\n ff_seq.append([179, 1, 0, 0])\n if nShots is not None:\n if isinstance(nShots, int):\n ff_seq.append([185, nShots - 2, 0, 0])\n else:\n ff_seq.append([185, int(nShots * 120) - 2, 0, 0])\n ff_seq.append([179, 2, 0, 0])\n if nShots is not None:\n if isinstance(nShots, int):\n for i in range(nOffShots - 2):\n ff_seq.append([179, 1, 0, 0])\n else:\n for i in range(int(nOffShots * 120) - 2):\n ff_seq.append([179, 1, 0, 0])\n return ff_seq\n\n def prepare_seq_PPburst(self, nShots=None, nOffShots=None):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)\n seq.sequence.put_seq(ff_seq)\n\n def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):\n single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots\n )\n ff_seq = []\n for i in range(nTimes):\n ff_seq += single_burst\n return ff_seq\n\n def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1\n ):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=\n nOffShots, nTimes=nTimes)\n seq.sequence.put_seq(ff_seq)\n\n def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_y.umv(yStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_y.mv(yEnd)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)\n self.sam_y.mv(yStart)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst_window(self, xStart, xEnd, yDelta, nRoundTrips,\n sweepTime, windowlist):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for j in windowList:\n self.sam_y.umv(windowList)\n self.sam_y.wait()\n print('Windos position %f' % self.sam_w.wm())\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.05)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n self.sam_x.mv(xStart)\n sleep(0.05)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,\n windowList, startgrid):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n sleeptime is the pp close time between window \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n self.sam_y.umv(windowList[startgrid])\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for j in range(len(windowList) - startgrid):\n self.sam_y.umv(windowList[startgrid + j])\n self.sam_y.wait()\n print('Windos position %f' % self.sam_y.wm())\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition', self.sam_y.wm())\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition', self.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n",
"step-4": "<mask token>\n\n\nclass User:\n\n def __init__(self):\n self._sync_markers = {(0.5): 0, (1): 1, (5): 2, (10): 3, (30): 4, (\n 60): 5, (120): 6, (360): 7}\n self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0', name='evr_pp')\n self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name=\n 'pp_delay')\n with safe_load('sam_x'):\n self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')\n with safe_load('sam_y'):\n self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')\n with safe_load('sam_z'):\n self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')\n with safe_load('op_focus'):\n self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')\n with safe_load('op_x'):\n self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')\n with safe_load('op_y'):\n self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')\n\n def takeRun(self, nEvents, record=True):\n daq.configure(events=120, record=record)\n daq.begin(events=nEvents)\n daq.wait()\n daq.end_run()\n\n def get_ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n return scan([daq], motor, start, end, nsteps)\n\n def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record)\n currPos = motor.wm()\n return scan([daq], motor, currPos + start, currPos + end, nsteps)\n\n def ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(scan([daq], motor, start, end, nsteps))\n\n def listscan(self, motor, posList, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(list_scan([daq], motor, posList))\n\n def dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n currPos = motor.wm()\n RE(scan([daq], motor, currPos + start, currPos + end, nsteps))\n\n def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2\n ):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n beamDelay = int(120 * deltaT_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n flyspeed = flymotor.velocity.get()\n flytime = distance / flyspeed\n flyshots = int(flytime / deltaT_shots)\n seq.rep_count.put(flyshots)\n seq.sequence.put_seq(fly_seq)\n\n def setPP_flipflip(self, nshots=20, deltaShots=30):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(1)\n seq.rep_count.put(nshots)\n beamDelay = int(delta_shots) - pp_shot_delay\n if beamDelay + pp_shot_delay < 4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]]\n seq.sequence.put_seq(ff_seq)\n\n def set_pp_flipflop(self):\n pp.flipflop(wait=True)\n\n def runflipflip(self, start, end, nsteps, nshots=20, deltaShots=30):\n self.set_pp_flipflop()\n for i in nsteps:\n self.evr_pp.ns_delay.set(start + delta * i)\n seq.start()\n time.sleep(5)\n\n def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):\n \"\"\"RE the plan.\"\"\"\n self.set_pp_flipflop()\n RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=\n record, use_l3t=use_l3t))\n\n def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None,\n use_l3t=None):\n \"\"\"Configure daq and do the scan, trust other code to set up the sequencer.\"\"\"\n yield from configure(daq, events=None, duration=None, record=record,\n use_l3t=use_l3t, controls=[evr])\n yield from scan([daq, seq], evr, start, end, nsteps)\n\n def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart,\n flyStop, deltaT_shots, record=False, pp_shot_delay=2):\n daq.disconnect()\n shiftMotor = foil_y\n flyMotor = foil_x\n self.setupSequencer(flyMotor, abs(flyStop - flyStart), deltaT_shots,\n pp_shot_delay=pp_shot_delay)\n daq.configure(-1, record=record, controls=[foil_x, foil_y])\n if isinstance(shiftSteps, int):\n RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n else:\n RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop,\n shiftSteps), flyMotor, [flyStart, flyStop], seq))\n\n def PPburst_sequence(self, nShots=None, nOffShots=2):\n if nOffShots < 2:\n raise ValueError('Minimum offshots is 2')\n ff_seq = [[185, 0, 0, 0]]\n ff_seq.append([179, 1, 0, 0])\n ff_seq.append([179, 1, 0, 0])\n if nShots is not None:\n if isinstance(nShots, int):\n ff_seq.append([185, nShots - 2, 0, 0])\n else:\n ff_seq.append([185, int(nShots * 120) - 2, 0, 0])\n ff_seq.append([179, 2, 0, 0])\n if nShots is not None:\n if isinstance(nShots, int):\n for i in range(nOffShots - 2):\n ff_seq.append([179, 1, 0, 0])\n else:\n for i in range(int(nOffShots * 120) - 2):\n ff_seq.append([179, 1, 0, 0])\n return ff_seq\n\n def prepare_seq_PPburst(self, nShots=None, nOffShots=None):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)\n seq.sequence.put_seq(ff_seq)\n\n def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):\n single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots\n )\n ff_seq = []\n for i in range(nTimes):\n ff_seq += single_burst\n return ff_seq\n\n def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1\n ):\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0)\n ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=\n nOffShots, nTimes=nTimes)\n seq.sequence.put_seq(ff_seq)\n\n def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos', x.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_y.umv(yStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_y.mv(yEnd)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)\n self.sam_y.mv(yStart)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst_window(self, xStart, xEnd, yDelta, nRoundTrips,\n sweepTime, windowlist):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for j in windowList:\n self.sam_y.umv(windowList)\n self.sam_y.wait()\n print('Windos position %f' % self.sam_w.wm())\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.05)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n self.sam_x.mv(xStart)\n sleep(0.05)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,\n windowList, startgrid):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n sleeptime is the pp close time between window \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n self.sam_y.umv(windowList[startgrid])\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n for j in range(len(windowList) - startgrid):\n self.sam_y.umv(windowList[startgrid + j])\n self.sam_y.wait()\n print('Windos position %f' % self.sam_y.wm())\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i + 1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition', self.sam_y.wm())\n sleep(1.2)\n self.sam_x.mv(xStart)\n sleep(0.1)\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition', self.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n",
"step-5": "from subprocess import check_output\n\nimport json\nimport sys\nimport time\nimport os\n\nimport numpy as np\nfrom hutch_python.utils import safe_load\nfrom ophyd import EpicsSignalRO\nfrom ophyd import EpicsSignal\nfrom bluesky import RunEngine\nfrom bluesky.plans import scan\nfrom bluesky.plans import list_scan\nfrom bluesky.plan_stubs import configure\n#from bluesky.plans import list_grid_scan\nfrom ophyd import Component as Cpt\nfrom ophyd import Device\nfrom pcdsdevices.epics_motor import Newport, IMS, MMC100\nfrom pcdsdevices.interface import BaseInterface\nfrom pcdsdevices.device_types import Trigger\nfrom pcdsdevices.areadetector import plugins\nfrom cxi.db import daq, seq\nfrom cxi.db import camviewer\nfrom cxi.db import RE\nfrom cxi.db import foil_x, foil_y\nfrom cxi.db import cxi_pulsepicker as pp, seq\nfrom cxi.db import bp, bpp, bps\nfrom cxi.plans import serp_seq_scan\nfrom time import sleep\n\nclass User():\n def __init__(self):\n self._sync_markers = {0.5:0, 1:1, 5:2, 10:3, 30:4, 60:5, 120:6, 360:7}\n self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0',name='evr_pp')\n self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name='pp_delay')\n\n with safe_load('sam_x'):\n self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x')\n with safe_load('sam_y'):\n self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y')\n with safe_load('sam_z'):\n self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z')\n #with safe_load('sam_pitch'):\n # self.sam_pitch = MMC100('CXI:USR:MMC:01', name='sam_pitch')\n #with safe_load('post_sam_x'):\n # self.post_sam_x = IMS('CXI:USR:MMS:27', name='post_sam_x')\n #with safe_load('post_sam_y'):\n # self.post_sam_y = MMC100('CXI:USR:MMC:02', name='post_sam_y')\n #with safe_load('post_sam_z'):\n # self.post_sam_z = MMC100('CXI:USR:MMC:03', name='post_sam_z')\n with safe_load('op_focus'):\n self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus')\n with safe_load('op_x'):\n self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x')\n with safe_load('op_y'):\n self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v')\n\n\n def takeRun(self, nEvents, record=True):\n daq.configure(events=120, record=record)\n daq.begin(events=nEvents)\n daq.wait()\n daq.end_run()\n\n def get_ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n return scan([daq], motor, start, end, nsteps)\n\n def get_dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record)\n currPos = motor.wm()\n return scan([daq], motor, currPos+start, currPos+end, nsteps)\n\n def ascan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(scan([daq], motor, start, end, nsteps))\n\n def listscan(self, motor, posList, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n RE(list_scan([daq], motor, posList))\n\n def dscan(self, motor, start, end, nsteps, nEvents, record=True):\n daq.configure(nEvents, record=record, controls=[motor])\n currPos = motor.wm()\n RE(scan([daq], motor, currPos+start, currPos+end, nsteps))\n\n def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2):\n ## Setup sequencer for requested rate\n #sync_mark = int(self._sync_markers[self._rate])\n #leave the sync marker: assume no dropping.\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n #seq.play_mode.put(0) # Run sequence once\n seq.play_mode.put(1) # Run sequence N Times\n \n # Determine the different sequences needed\n beamDelay = int(120*deltaT_shots)-pp_shot_delay\n if (beamDelay+pp_shot_delay)<4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n fly_seq = [[185, beamDelay, 0, 0],\n [187, pp_shot_delay, 0, 0]]\n #logging.debug(\"Sequence: {}\".format(fly_seq)) \n\n #calculate how often to shoot in requested distance\n flyspeed = flymotor.velocity.get()\n flytime = distance/flyspeed\n flyshots = int(flytime/deltaT_shots)\n seq.rep_count.put(flyshots) # Run sequence N Times\n\n seq.sequence.put_seq(fly_seq) \n\n def setPP_flipflip(self, nshots=20, deltaShots=30):\n ## Setup sequencer for requested rate\n #sync_mark = int(self._sync_markers[self._rate])\n #leave the sync marker: assume no dropping.\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n #seq.play_mode.put(0) # Run sequence once\n seq.play_mode.put(1) # Run sequence N Times\n seq.rep_count.put(nshots) # Run sequence N Times\n \n # Determine the different sequences needed\n beamDelay = int(delta_shots)-pp_shot_delay\n if (beamDelay+pp_shot_delay)<4:\n print('PP cannot go faster than 40 Hz in flip-flip mode, quit!')\n return\n ff_seq = [[185, beamDelay, 0, 0],\n [187, pp_shot_delay, 0, 0]]\n #logging.debug(\"Sequence: {}\".format(fly_seq)) \n seq.sequence.put_seq(ff_seq) \n\n def set_pp_flipflop(self):\n pp.flipflop(wait=True)\n\n def runflipflip(self, start, end, nsteps,nshots=20, deltaShots=30):\n self.set_pp_flipflop()\n #self.setPP_flipflip(nshots=20, deltaShots=6)\n for i in nsteps:\n self.evr_pp.ns_delay.set(start+delta*i)\n seq.start()\n time.sleep(5)\n\n def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None):\n \"\"\"RE the plan.\"\"\"\n self.set_pp_flipflop()\n RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps,\n record=record, use_l3t=use_l3t))\n\n def evr_seq_plan(self, daq, seq, evr, start, end, nsteps,\n record=None, use_l3t=None):\n \"\"\"Configure daq and do the scan, trust other code to set up the sequencer.\"\"\"\n yield from configure(daq, events=None, duration=None, record=record,\n use_l3t=use_l3t, controls=[evr])\n yield from scan([daq, seq], evr, start, end, nsteps)\n\n def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart, flyStop, deltaT_shots, record=False, pp_shot_delay=2):\n daq.disconnect() #make sure we start from fresh point.\n shiftMotor=foil_y\n flyMotor=foil_x\n self.setupSequencer(flyMotor, abs(flyStop-flyStart), deltaT_shots, pp_shot_delay=pp_shot_delay)\n daq.configure(-1, record=record, controls=[foil_x, foil_y])\n #daq.begin(-1)\n \n if isinstance(shiftSteps, int):\n RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop, shiftSteps), flyMotor, [flyStart, flyStop], seq))\n else:\n RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop, shiftSteps), flyMotor, [flyStart, flyStop], seq))\n\n def PPburst_sequence(self, nShots=None, nOffShots=2):\n if nOffShots < 2:\n raise ValueError('Minimum offshots is 2')\n ff_seq = [[185, 0, 0, 0]]\n ff_seq.append([179, 1 , 0, 0])\n ff_seq.append([179, 1 , 0, 0])\n if nShots is not None:\n if isinstance(nShots , int):\n ff_seq.append([185, nShots-2, 0, 0])\n else:\n ff_seq.append([185, int(nShots*120)-2, 0, 0])\n ff_seq.append([179, 2, 0, 0])\n if nShots is not None:\n if isinstance(nShots , int):\n for i in range(nOffShots-2):\n ff_seq.append([179, 1, 0, 0])\n else:\n for i in range(int(nOffShots*120)-2):\n ff_seq.append([179, 1, 0, 0])\n return ff_seq\n\n def prepare_seq_PPburst(self, nShots=None, nOffShots=None):\n ## Setup sequencer for requested rate\n #sync_mark = int(self._sync_markers[self._rate])\n #leave the sync marker: assume no dropping.\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0) # Run sequence once\n #seq.play_mode.put(1) # Run sequence N Times\n #seq.rep_count.put(nshots) # Run sequence N Times\n \n ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)\n seq.sequence.put_seq(ff_seq)\n\n def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1):\n single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots)\n ff_seq = []\n for i in range(nTimes):\n ff_seq += single_burst\n return ff_seq\n\n def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1):\n ## Setup sequencer for requested rate\n #sync_mark = int(self._sync_markers[self._rate])\n #leave the sync marker: assume no dropping.\n sync_mark = int(self._sync_markers[120])\n seq.sync_marker.put(sync_mark)\n seq.play_mode.put(0) # Run sequence once\n #seq.play_mode.put(1) # Run sequence N Times\n #seq.rep_count.put(nshots) # Run sequence N Times\n\n ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=nOffShots, nTimes=nTimes)\n seq.sequence.put_seq(ff_seq)\n \n def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)#orignal was 1\n self.sam_x.mv(xStart)\n sleep(0.1)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos',x.sam_y.wm())\n sleep(1.2)#original was 1\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()\n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1.2)#orignal was 1\n self.sam_x.mv(xStart)\n sleep(0.1)\n #pp.open()\n #sleep(sweepTime)\n #pp.close()\n seq.start()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('ypos',x.sam_y.wm())\n sleep(1.2)#original was 1\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime):\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_y.umv(yStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_y.mv(yEnd)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)#orignal was 1\n self.sam_y.mv(yStart)\n sleep(0.05)\n pp.open()\n sleep(sweepTime)\n pp.close()\n self.sam_y.wait()\n self.sam_x.mvr(xDelta)\n sleep(1.2)#original was 1\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n\n\n\n def dumbSnake_burst_window(self,xStart,xEnd,yDelta, nRoundTrips, sweepTime,windowlist):#for burst mode\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n #windowList = np.zeros([numYwindow,numXwindow],dtype=object)\n \n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n for j in (windowList):\n self.sam_y.umv(windowList)\n self.sam_y.wait()\n print('Windos position %f'%(self.sam_w.wm()))\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.05)\n seq.start()#start sequence Need to be set \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)#wait for turning around \n self.sam_x.mv(xStart)\n sleep(0.05)\n #pp.open()\n seq.start()#start sequence \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()\n\n def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,windowList,startgrid):#for burst mode\n \"\"\" \n simple rastering for running at 120Hz with shutter open/close before\n and after motion stop.\n sleeptime is the pp close time between window \n Need some testing how to deal with intermittent motion errors.\n \"\"\"\n self.sam_x.umv(xStart)\n self.sam_y.umv(windowList[startgrid])\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n \n for j in range(len(windowList)-startgrid):\n self.sam_y.umv(windowList[startgrid+j])\n self.sam_y.wait()\n print('Windos position %f'%(self.sam_y.wm()))\n\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()#start sequence Need to be set \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition',self.sam_y.wm())\n sleep(1.2)#wait for turning around \n self.sam_x.mv(xStart)\n sleep(0.1)\n #pp.open()\n seq.start()#start sequence \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition',self.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n \n daq.end_run()\n daq.disconnect()\n\n\n #daq.end()\n\n #def run_serp_seq_scan_expl(self, yStart, yStop, ySteps, flyStart, flyStop, deltaT_shots, record=False, pp_shot_delay=1):\n # daq.disconnect() #make sure we start from fresh point.\n # self.setupSequencer(foil_y, abs(flyStop-flyStart), deltaT_shots, pp_shot_delay=pp_shot_delay)\n # daq.configure(-1, record=record, controls=[foil_x, foil_y])\n #daq.begin(-1)\n \n # if isinstance(ySteps, int):\n # RE(serp_seq_scan(foil_x, np.linspace(yStart, yStop, ySteps), foil_y, [flyStart, flyStop], seq))\n # else:\n # RE(serp_seq_scan(foil_x, np.arange(yStart, yStop, ySteps), foil_y, [flyStart, flyStop], seq))\n # #daq.end()\n",
"step-ids": [
12,
21,
22,
24,
26
]
}
|
[
12,
21,
22,
24,
26
] |
from discord.ext import commands, tasks
from discord.utils import get
import discord
import re
import json
import time
import random
import asyncio
import os
import datetime
from live_ticker_scrape import wrangle_data
from tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link
es_bot = discord.Client()
nas_bot = discord.Client()
dow_bot = discord.Client()
us10y_bot = discord.Client()
vix_bot = discord.Client()
ticker_vix = discord.Client()
dollar_bot = discord.Client()
silver_bot = discord.Client()
btc_bot = discord.Client()
eth_bot= discord.Client()
link_bot = discord.Client()
loop = asyncio.get_event_loop()
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
'''
@tasks.loop() can be changed to seconds, minutes, hours
https://discordpy.readthedocs.io/en/latest/ext/tasks/
'''
@tasks.loop(seconds=5)
async def called_second():
## get all guild ids that the bot is joined in
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
## es
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"1) {name_es}")
await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ES {watching_es}"))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
##nas
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas= ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"2) {name_nas}")
await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"NQ {watching_nas}"))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
## dow
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"3) {name_dow}")
await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DJI {watching_dow}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
## vix
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_vix}")
await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"VIX {watching_vix}"))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
# dollar
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"5) {name_dollar}")
await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DXY {watching_dollar}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
# us10y
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_us10y}")
await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"US10Y {watching_us10y}"))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
# silver
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"6) {name_silver}")
await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
#shit coin stuff
# btc
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"7) {name_btc}")
await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"BTC {watching_btc}"))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
# eth
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth= '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"8) {name_eth}")
await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ETH {watching_eth}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
# link
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"9) {name_link}")
await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"LINK {watching_link}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print("Finished waiting")
called_second.start()
async def create_bots():
es_task= loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots())
|
normal
|
{
"blob_id": "e57109f1c5c2e1468ef1cf9f10fba743633ca150",
"index": 8094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\n@tasks.loop(seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-3": "<mask token>\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\nbtc_bot = discord.Client()\neth_bot = discord.Client()\nlink_bot = discord.Client()\nloop = asyncio.get_event_loop()\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\n@tasks.loop(seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-4": "from discord.ext import commands, tasks\nfrom discord.utils import get\nimport discord\nimport re\nimport json\nimport time\nimport random\nimport asyncio\nimport os\nimport datetime\nfrom live_ticker_scrape import wrangle_data\nfrom tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver, link\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\nbtc_bot = discord.Client()\neth_bot = discord.Client()\nlink_bot = discord.Client()\nloop = asyncio.get_event_loop()\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\n@tasks.loop(seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-5": "from discord.ext import commands, tasks\nfrom discord.utils import get\nimport discord\nimport re\nimport json \nimport time \nimport random\nimport asyncio\nimport os\nimport datetime\n\nfrom live_ticker_scrape import wrangle_data\nfrom tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link\n\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\n\n\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\n\n\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\n\nbtc_bot = discord.Client()\neth_bot= discord.Client()\nlink_bot = discord.Client()\n\nloop = asyncio.get_event_loop()\n\n@es_bot.event\nasync def on_ready():\n print('es started') \n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n@link_bot.event\nasync def on_ready():\n print('link started')\n \n'''\n@tasks.loop() can be changed to seconds, minutes, hours\nhttps://discordpy.readthedocs.io/en/latest/ext/tasks/\n'''\n\n@tasks.loop(seconds=5)\nasync def called_second():\n ## get all guild ids that the bot is joined in \n\n\n data = wrangle_data()\n print(data)\n\n ticker_es = data['es']\n ticker_nas = data['nas'] \n ticker_dow = data['dow'] \n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n ## es\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds] \n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"1) {name_es}\") \n await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"ES {watching_es}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n ##nas\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds] \n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas= ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"2) {name_nas}\")\n await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"NQ {watching_nas}\"))\n except:\n print(f'broke in {guild_channel}')\n else: \n print('no nas data')\n ## dow\n if ticker_dow: \n guild_ids = [guild.id for guild in dow_bot.guilds] \n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"3) {name_dow}\")\n await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"DJI {watching_dow}\"))\n\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n\n ## vix \n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds] \n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"4) {name_vix}\")\n await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"VIX {watching_vix}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n\n # dollar \n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds] \n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"5) {name_dollar}\")\n await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"DXY {watching_dollar}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n # us10y \n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds] \n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"4) {name_us10y}\")\n await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"US10Y {watching_us10y}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n\n # silver \n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds] \n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n \n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"6) {name_silver}\")\n await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n #shit coin stuff\n # btc\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds] \n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]\n\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"7) {name_btc}\")\n await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"BTC {watching_btc}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n # eth \n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds] \n name_eth= '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"8) {name_eth}\")\n await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"ETH {watching_eth}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n # link\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds] \n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"9) {name_link}\")\n await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"LINK {watching_link}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready() \n\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n\n print(\"Finished waiting\")\n\ncalled_second.start()\n\nasync def create_bots():\n es_task= loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n\n us10y_task = loop.create_task(us10y_bot.start(us10y)) \n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n \n await es_task \n await nas_task\n await dow_task\n await vix_task\n\n await us10y_task\n await dollar_task \n await silver_task\n\n await btc_task \n await eth_task\n await link_task \n\nloop.run_until_complete(create_bots())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False, **kw):
super(Model, self).__init__()
fc_embedding = []
# First, we should convert the 1 dim data to a higher dim
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, False, True,
bidirectional=is_bidir)
self.decoder = nn.Sequential(
nn.Linear(encoder_layer * (int(is_bidir) + 1) * hidden_size, hidden_size),
nn.Linear(hidden_size, hidden_size // step),
nn.Linear(hidden_size // step, 1),
)
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
# optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
normal
|
{
"blob_id": "188f82b0fb04d6814d77617fa9148113d0e6ef01",
"index": 2170,
"step-1": "<mask token>\n\n\nclass Model(nn.Module):\n <mask token>\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n <mask token>\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-2": "<mask token>\n\n\nclass Model(nn.Module):\n\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,\n **kw):\n super(Model, self).__init__()\n fc_embedding = []\n for i in range(int(math.log(hidden_size, step))):\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.\n pow(step, i + 1))))\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(\n hidden_size, step)))), hidden_size))\n self.fc_embedding = nn.Sequential(*fc_embedding)\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, \n False, True, bidirectional=is_bidir)\n self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(\n is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(\n hidden_size, hidden_size // step), nn.Linear(hidden_size //\n step, 1))\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n <mask token>\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-3": "<mask token>\n\n\nclass Model(nn.Module):\n\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,\n **kw):\n super(Model, self).__init__()\n fc_embedding = []\n for i in range(int(math.log(hidden_size, step))):\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.\n pow(step, i + 1))))\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(\n hidden_size, step)))), hidden_size))\n self.fc_embedding = nn.Sequential(*fc_embedding)\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, \n False, True, bidirectional=is_bidir)\n self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(\n is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(\n hidden_size, hidden_size // step), nn.Linear(hidden_size //\n step, 1))\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n\n def train_batch(self, input_seq, target_seq, category, optimizer, logger):\n \"\"\"\n doc:\n train the model with given data and optimizer, return log info\n param:\n input_seq: torch.LongTensor, [batch, max_seq_len]\n target_seq: torch.LongTensor, [batch, max_seq_len]\n optimizer: optimizer object\n logger: logger object\n \"\"\"\n seq_pred = self.forward(input_seq, target_seq)\n loss = self._loss_fn(seq_pred, target_seq)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.item(), seq_pred\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-4": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Model(nn.Module):\n\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,\n **kw):\n super(Model, self).__init__()\n fc_embedding = []\n for i in range(int(math.log(hidden_size, step))):\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.\n pow(step, i + 1))))\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(\n hidden_size, step)))), hidden_size))\n self.fc_embedding = nn.Sequential(*fc_embedding)\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, \n False, True, bidirectional=is_bidir)\n self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(\n is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(\n hidden_size, hidden_size // step), nn.Linear(hidden_size //\n step, 1))\n\n def forward(self, input_seq, target_seq=None):\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\n _, encoding_result = self.encoder(input_seq)\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\n encoding_result = torch.reshape(encoding_result, [encoding_result.\n shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\n seq_pred = self.decoder(encoding_result)\n return seq_pred.squeeze(1)\n\n def _loss_fn(self, seq_pred, target_seq):\n return F.mse_loss(seq_pred, target_seq)\n\n def train_batch(self, input_seq, target_seq, category, optimizer, logger):\n \"\"\"\n doc:\n train the model with given data and optimizer, return log info\n param:\n input_seq: torch.LongTensor, [batch, max_seq_len]\n target_seq: torch.LongTensor, [batch, max_seq_len]\n optimizer: optimizer object\n logger: logger object\n \"\"\"\n seq_pred = self.forward(input_seq, target_seq)\n loss = self._loss_fn(seq_pred, target_seq)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.item(), seq_pred\n\n def infer_batch(self, input_seq, logger):\n \"\"\"\n model inference.\n The given data can be in the form of batch or single isinstance\n \"\"\"\n return self.forward(input_seq, None)\n",
"step-5": "import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass Model(nn.Module):\r\n\r\n def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False, **kw):\r\n super(Model, self).__init__()\r\n fc_embedding = []\r\n\r\n # First, we should convert the 1 dim data to a higher dim\r\n for i in range(int(math.log(hidden_size, step))):\r\n fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.pow(step, i + 1))))\r\n fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(hidden_size, step)))), hidden_size))\r\n self.fc_embedding = nn.Sequential(*fc_embedding)\r\n self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, False, True,\r\n bidirectional=is_bidir)\r\n\r\n self.decoder = nn.Sequential(\r\n nn.Linear(encoder_layer * (int(is_bidir) + 1) * hidden_size, hidden_size),\r\n nn.Linear(hidden_size, hidden_size // step),\r\n nn.Linear(hidden_size // step, 1),\r\n )\r\n\r\n def forward(self, input_seq, target_seq=None):\r\n input_seq = self.fc_embedding(input_seq.unsqueeze(-1))\r\n _, encoding_result = self.encoder(input_seq)\r\n encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()\r\n encoding_result = torch.reshape(encoding_result, [encoding_result.shape[0], encoding_result.shape[1] * encoding_result.shape[2]])\r\n seq_pred = self.decoder(encoding_result)\r\n return seq_pred.squeeze(1)\r\n\r\n def _loss_fn(self, seq_pred, target_seq):\r\n return F.mse_loss(seq_pred, target_seq)\r\n\r\n def train_batch(self, input_seq, target_seq, category, optimizer, logger):\r\n \"\"\"\r\n doc:\r\n train the model with given data and optimizer, return log info\r\n param:\r\n input_seq: torch.LongTensor, [batch, max_seq_len]\r\n target_seq: torch.LongTensor, [batch, max_seq_len]\r\n optimizer: optimizer object\r\n logger: logger object\r\n \"\"\"\r\n seq_pred = self.forward(input_seq, target_seq)\r\n loss = self._loss_fn(seq_pred, target_seq)\r\n\r\n # optimize\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n return loss.item(), seq_pred\r\n\r\n def infer_batch(self, input_seq, logger):\r\n \"\"\"\r\n model inference.\r\n The given data can be in the form of batch or single isinstance\r\n \"\"\"\r\n return self.forward(input_seq, None)\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class CouchTests2(unittest.TestCase):
<|reserved_special_token_0|>
def test_bar(self):
self.assertEqual(1, 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CouchTests2(unittest.TestCase):
def test_foo(self):
self.assertEqual(1, 1)
def test_bar(self):
self.assertEqual(1, 1)
<|reserved_special_token_1|>
__author__ = 'Administrator'
<|reserved_special_token_0|>
class CouchTests2(unittest.TestCase):
def test_foo(self):
self.assertEqual(1, 1)
def test_bar(self):
self.assertEqual(1, 1)
<|reserved_special_token_1|>
__author__ = 'Administrator'
import unittest
class CouchTests2(unittest.TestCase):
def test_foo(self):
self.assertEqual(1, 1)
def test_bar(self):
self.assertEqual(1, 1)
|
flexible
|
{
"blob_id": "cd4f22b8e2188e8019e7324e80d64a7b95f8f956",
"index": 1961,
"step-1": "<mask token>\n\n\nclass CouchTests2(unittest.TestCase):\n <mask token>\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-2": "<mask token>\n\n\nclass CouchTests2(unittest.TestCase):\n\n def test_foo(self):\n self.assertEqual(1, 1)\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-3": "__author__ = 'Administrator'\n<mask token>\n\n\nclass CouchTests2(unittest.TestCase):\n\n def test_foo(self):\n self.assertEqual(1, 1)\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-4": "__author__ = 'Administrator'\nimport unittest\n\n\nclass CouchTests2(unittest.TestCase):\n\n def test_foo(self):\n self.assertEqual(1, 1)\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if car_state == True:
print('Car is stopped!')
if u_input == 'start':
car_state = True
print('Car has started!')
elif u_input == 'stop':
car_state == False
print('Car has stopped!')
else:
print("I don''t understand that...")
<|reserved_special_token_1|>
car_state = False
u_input = input(f'>')
if car_state == True:
print('Car is stopped!')
if u_input == 'start':
car_state = True
print('Car has started!')
elif u_input == 'stop':
car_state == False
print('Car has stopped!')
else:
print("I don''t understand that...")
<|reserved_special_token_1|>
car_state = False
u_input = input(f'>')
if car_state == True:
print('Car is stopped!')
if u_input == 'start':
car_state = True
print('Car has started!')
elif u_input == 'stop':
car_state == False
print('Car has stopped!')
else:
print('''I don''t understand that...''')
|
flexible
|
{
"blob_id": "2766339632200c26a8c6cd3abff28b1495870b9a",
"index": 9207,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif car_state == True:\n print('Car is stopped!')\nif u_input == 'start':\n car_state = True\n print('Car has started!')\nelif u_input == 'stop':\n car_state == False\n print('Car has stopped!')\nelse:\n print(\"I don''t understand that...\")\n",
"step-3": "car_state = False\nu_input = input(f'>')\nif car_state == True:\n print('Car is stopped!')\nif u_input == 'start':\n car_state = True\n print('Car has started!')\nelif u_input == 'stop':\n car_state == False\n print('Car has stopped!')\nelse:\n print(\"I don''t understand that...\")\n",
"step-4": "car_state = False\r\nu_input = input(f'>')\r\n\r\nif car_state == True:\r\n print('Car is stopped!')\r\n\r\nif u_input == 'start':\r\n car_state = True\r\n print('Car has started!')\r\nelif u_input == 'stop':\r\n car_state == False\r\n print('Car has stopped!')\r\nelse:\r\n print('''I don''t understand that...''')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Note: names of methods in this module, if seem weird, are the same as in Hunspell's ``suggest.cxx``
to keep track of them.
"""
from typing import Iterator, Union, List, Set
from spylls.hunspell.data import aff
MAX_CHAR_DISTANCE = 4
def replchars(word: str, reptable: List[aff.RepPattern]) -> Iterator[Union[str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
# TODO: compiled at aff loading
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_', ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) -> Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos+len(option):]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
def swapchar(word: str) -> Iterator[str]:
"""
Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces
also doubleswaps: ahev -> have.
"""
if len(word) < 2:
return
for i in range(0, len(word) - 1):
yield word[:i] + word[i+1] + word[i+1] + word[i+2:]
# try double swaps for short words
# ahev -> have, owudl -> would
if len(word) in [4, 5]:
yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1] + word[-2]
if len(word) == 5:
yield word[0] + word[2] + word[1] + word[-1] + word[-2]
def longswapchar(word: str) -> Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):
yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]
def badcharkey(word: str, layout: str) -> Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i+1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos-1] != '|':
yield before + layout[pos-1] + after
if pos + 1 < len(layout) and layout[pos+1] != '|':
yield before + layout[pos+1] + after
pos = layout.find(c, pos+1)
def extrachar(word: str) -> Iterator[str]:
"""
Produces permutations with one char removed in all possible positions
"""
if len(word) < 2:
return
for i in range(0, len(word)):
yield word[:i] + word[i+1:]
def forgotchar(word: str, trystring: str) -> Iterator[str]:
"""
Produces permutations with one char inserted in all possible possitions.
List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,
doesn't try anything! Chars there are expected to be sorted in order of chars usage in language
(most used characters first).
"""
if not trystring:
return
for c in trystring:
for i in range(0, len(word)):
yield word[:i] + c + word[i:]
def movechar(word: str) -> Iterator[str]:
"""
Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,
because it is already handled by :meth:`swapchar`)
"""
if len(word) < 2:
return
for frompos, char in enumerate(word):
for topos in range(frompos + 3, min(len(word), frompos + MAX_CHAR_DISTANCE + 1)):
yield word[:frompos] + word[frompos+1:topos] + char + word[topos:]
for frompos in reversed(range(0, len(word))):
for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1), frompos - 1)):
yield word[:topos] + word[frompos] + word[topos:frompos] + word[frompos+1:]
def badchar(word: str, trystring: str) -> Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i+1:]
def doubletwochars(word: str) -> Iterator[str]:
"""
Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)
"""
if len(word) < 5:
return
# TODO: 1) for vacacation yields "vacation" twice, hunspell's algo kinda wiser
# 2) maybe just use regexp?..
for i in range(2, len(word)):
if word[i-2] == word[i] and word[i-3] == word[i-1]:
yield word[:i-1] + word[i+1:]
def twowords(word: str) -> Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
|
normal
|
{
"blob_id": "cfba55505f3290a14b98d594bc871a74812c7c57",
"index": 5594,
"step-1": "<mask token>\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\n<mask token>\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\n<mask token>\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\n<mask token>\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-2": "<mask token>\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\n<mask token>\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\n<mask token>\n\n\ndef forgotchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n if len(word) < 2:\n return\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos +\n MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:\n ]\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),\n frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[\n frompos + 1:]\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\ndef doubletwochars(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n if len(word) < 5:\n return\n for i in range(2, len(word)):\n if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:\n yield word[:i - 1] + word[i + 1:]\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-3": "<mask token>\nMAX_CHAR_DISTANCE = 4\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\ndef swapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces\n also doubleswaps: ahev -> have.\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i + 1] + word[i + 1] + word[i + 2:]\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1\n ] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\ndef extrachar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char removed in all possible positions\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word)):\n yield word[:i] + word[i + 1:]\n\n\ndef forgotchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n if len(word) < 2:\n return\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos +\n MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:\n ]\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),\n frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[\n frompos + 1:]\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\ndef doubletwochars(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n if len(word) < 5:\n return\n for i in range(2, len(word)):\n if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:\n yield word[:i - 1] + word[i + 1:]\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-4": "<mask token>\nfrom typing import Iterator, Union, List, Set\nfrom spylls.hunspell.data import aff\nMAX_CHAR_DISTANCE = 4\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\ndef swapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces\n also doubleswaps: ahev -> have.\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i + 1] + word[i + 1] + word[i + 2:]\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1\n ] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\ndef extrachar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char removed in all possible positions\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word)):\n yield word[:i] + word[i + 1:]\n\n\ndef forgotchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n if len(word) < 2:\n return\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos +\n MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:\n ]\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),\n frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[\n frompos + 1:]\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\ndef doubletwochars(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n if len(word) < 5:\n return\n for i in range(2, len(word)):\n if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:\n yield word[:i - 1] + word[i + 1:]\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-5": "\"\"\"\nNote: names of methods in this module, if seem weird, are the same as in Hunspell's ``suggest.cxx``\nto keep track of them.\n\"\"\"\n\nfrom typing import Iterator, Union, List, Set\n\nfrom spylls.hunspell.data import aff\n\n\nMAX_CHAR_DISTANCE = 4\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) -> Iterator[Union[str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n\n if len(word) < 2 or not reptable:\n return\n\n for pattern in reptable:\n # TODO: compiled at aff loading\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_', ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) -> Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos+len(option):]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n\n for variant in mapchars_internal(word):\n yield variant\n\n\ndef swapchar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces\n also doubleswaps: ahev -> have.\n \"\"\"\n\n if len(word) < 2:\n return\n\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i+1] + word[i+1] + word[i+2:]\n\n # try double swaps for short words\n # ahev -> have, owudl -> would\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]\n\n\ndef longswapchar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):\n yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]\n\n\ndef badcharkey(word: str, layout: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i+1:]\n if c != c.upper():\n yield before + c.upper() + after\n\n if not layout:\n continue\n\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos-1] != '|':\n yield before + layout[pos-1] + after\n if pos + 1 < len(layout) and layout[pos+1] != '|':\n yield before + layout[pos+1] + after\n pos = layout.find(c, pos+1)\n\n\ndef extrachar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with one char removed in all possible positions\n \"\"\"\n if len(word) < 2:\n return\n\n for i in range(0, len(word)):\n yield word[:i] + word[i+1:]\n\n\ndef forgotchar(word: str, trystring: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n\n if not trystring:\n return\n\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n\n if len(word) < 2:\n return\n\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos + MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos+1:topos] + char + word[topos:]\n\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1), frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[frompos+1:]\n\n\ndef badchar(word: str, trystring: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n\n if not trystring:\n return\n\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i+1:]\n\n\ndef doubletwochars(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n\n if len(word) < 5:\n return\n\n # TODO: 1) for vacacation yields \"vacation\" twice, hunspell's algo kinda wiser\n # 2) maybe just use regexp?..\n for i in range(2, len(word)):\n if word[i-2] == word[i] and word[i-3] == word[i-1]:\n yield word[:i-1] + word[i+1:]\n\n\ndef twowords(word: str) -> Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-ids": [
6,
9,
12,
13,
14
]
}
|
[
6,
9,
12,
13,
14
] |
<|reserved_special_token_0|>
def concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path: str, save_pth: str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df: pd.DataFrame):
df.sort_values('name', inplace=True)
df.drop_duplicates(subset='name', keep=False, inplace=True)
return df
def remove_invalid_rows_df(df: pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path: str, save_pth: str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df: pd.DataFrame):
df.sort_values('name', inplace=True)
df.drop_duplicates(subset='name', keep=False, inplace=True)
return df
def remove_invalid_rows_df(df: pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
<|reserved_special_token_0|>
for line in f:
count += 1
split = line.split()
df = df.append({'count': split[0], 'name': split[1].capitalize()},
ignore_index=True)
if count % save_every == 0:
df.to_csv('fbnames.csv')
df.to_csv('fbnames.csv')
<|reserved_special_token_0|>
for file in files:
f = open(f'namesbystate\\{file}', 'r')
count = 0
for line in f:
count += 1
split = line.split(',')
df = df.append({'count': int(split[4]), 'name': split[3]},
ignore_index=True)
if save_every % count == 0:
df = df.groupby(['name']).sum()
df.to_csv('namesbystates.csv')
df.groupby(['name']).sum()
df.to_csv('namesbystates.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ALLOWED_CHARS = string.ascii_letters + '-,. "()\''
def concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path: str, save_pth: str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df: pd.DataFrame):
df.sort_values('name', inplace=True)
df.drop_duplicates(subset='name', keep=False, inplace=True)
return df
def remove_invalid_rows_df(df: pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
df = pd.DataFrame(columns=['count', 'name'])
f = open('fbnames.txt', 'r')
count = 0
save_every = 2000
for line in f:
count += 1
split = line.split()
df = df.append({'count': split[0], 'name': split[1].capitalize()},
ignore_index=True)
if count % save_every == 0:
df.to_csv('fbnames.csv')
df.to_csv('fbnames.csv')
files = os.listdir('namesbystate/')
df = pd.DataFrame(columns=['count', 'name'])
count = 0
save_every = 2000
for file in files:
f = open(f'namesbystate\\{file}', 'r')
count = 0
for line in f:
count += 1
split = line.split(',')
df = df.append({'count': int(split[4]), 'name': split[3]},
ignore_index=True)
if save_every % count == 0:
df = df.groupby(['name']).sum()
df.to_csv('namesbystates.csv')
df.groupby(['name']).sum()
df.to_csv('namesbystates.csv')
<|reserved_special_token_1|>
import pandas as pd
import glob
import string
import os
ALLOWED_CHARS = string.ascii_letters + '-,. "()\''
def concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path: str, save_pth: str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df: pd.DataFrame):
df.sort_values('name', inplace=True)
df.drop_duplicates(subset='name', keep=False, inplace=True)
return df
def remove_invalid_rows_df(df: pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
df = pd.DataFrame(columns=['count', 'name'])
f = open('fbnames.txt', 'r')
count = 0
save_every = 2000
for line in f:
count += 1
split = line.split()
df = df.append({'count': split[0], 'name': split[1].capitalize()},
ignore_index=True)
if count % save_every == 0:
df.to_csv('fbnames.csv')
df.to_csv('fbnames.csv')
files = os.listdir('namesbystate/')
df = pd.DataFrame(columns=['count', 'name'])
count = 0
save_every = 2000
for file in files:
f = open(f'namesbystate\\{file}', 'r')
count = 0
for line in f:
count += 1
split = line.split(',')
df = df.append({'count': int(split[4]), 'name': split[3]},
ignore_index=True)
if save_every % count == 0:
df = df.groupby(['name']).sum()
df.to_csv('namesbystates.csv')
df.groupby(['name']).sum()
df.to_csv('namesbystates.csv')
<|reserved_special_token_1|>
import pandas as pd
import glob
import string
import os
ALLOWED_CHARS = string.ascii_letters + "-,. \"()'"
def concat_all_data(path : str = 'Data/*.csv', save_path : str = 'Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path : str, save_pth : str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df : pd.DataFrame):
df.sort_values("name", inplace = True)
df.drop_duplicates(subset="name", keep=False, inplace=True)
return df
def remove_invalid_rows_df(df : pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
df = pd.DataFrame(columns=['count', 'name'])
f = open("fbnames.txt", "r")
count = 0
save_every = 2000
for line in f:
count += 1
split = line.split()
df = df.append({'count':split[0], 'name':split[1].capitalize()}, ignore_index=True)
if count % save_every == 0:
df.to_csv("fbnames.csv")
df.to_csv("fbnames.csv")
files = os.listdir("namesbystate/")
df = pd.DataFrame(columns=['count', 'name'])
count = 0
save_every = 2000
for file in files:
f = open(f"namesbystate\{file}", "r")
count = 0
for line in f:
count += 1
split = line.split(",")
df = df.append({"count":int(split[4]),"name":split[3]}, ignore_index=True)
if save_every % count == 0:
df = df.groupby(['name']).sum()
df.to_csv("namesbystates.csv")
df.groupby(['name']).sum()
df.to_csv("namesbystates.csv")
|
flexible
|
{
"blob_id": "0a5e30483c1fde10410c442a1ccd1f79bfb329c8",
"index": 8457,
"step-1": "<mask token>\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\n<mask token>\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count': split[0], 'name': split[1].capitalize()},\n ignore_index=True)\n if count % save_every == 0:\n df.to_csv('fbnames.csv')\ndf.to_csv('fbnames.csv')\n<mask token>\nfor file in files:\n f = open(f'namesbystate\\\\{file}', 'r')\n count = 0\n for line in f:\n count += 1\n split = line.split(',')\n df = df.append({'count': int(split[4]), 'name': split[3]},\n ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv('namesbystates.csv')\ndf.groupby(['name']).sum()\ndf.to_csv('namesbystates.csv')\n",
"step-3": "<mask token>\nALLOWED_CHARS = string.ascii_letters + '-,. \"()\\''\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\ndf = pd.DataFrame(columns=['count', 'name'])\nf = open('fbnames.txt', 'r')\ncount = 0\nsave_every = 2000\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count': split[0], 'name': split[1].capitalize()},\n ignore_index=True)\n if count % save_every == 0:\n df.to_csv('fbnames.csv')\ndf.to_csv('fbnames.csv')\nfiles = os.listdir('namesbystate/')\ndf = pd.DataFrame(columns=['count', 'name'])\ncount = 0\nsave_every = 2000\nfor file in files:\n f = open(f'namesbystate\\\\{file}', 'r')\n count = 0\n for line in f:\n count += 1\n split = line.split(',')\n df = df.append({'count': int(split[4]), 'name': split[3]},\n ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv('namesbystates.csv')\ndf.groupby(['name']).sum()\ndf.to_csv('namesbystates.csv')\n",
"step-4": "import pandas as pd\nimport glob\nimport string\nimport os\nALLOWED_CHARS = string.ascii_letters + '-,. \"()\\''\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\ndf = pd.DataFrame(columns=['count', 'name'])\nf = open('fbnames.txt', 'r')\ncount = 0\nsave_every = 2000\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count': split[0], 'name': split[1].capitalize()},\n ignore_index=True)\n if count % save_every == 0:\n df.to_csv('fbnames.csv')\ndf.to_csv('fbnames.csv')\nfiles = os.listdir('namesbystate/')\ndf = pd.DataFrame(columns=['count', 'name'])\ncount = 0\nsave_every = 2000\nfor file in files:\n f = open(f'namesbystate\\\\{file}', 'r')\n count = 0\n for line in f:\n count += 1\n split = line.split(',')\n df = df.append({'count': int(split[4]), 'name': split[3]},\n ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv('namesbystates.csv')\ndf.groupby(['name']).sum()\ndf.to_csv('namesbystates.csv')\n",
"step-5": "import pandas as pd \nimport glob\nimport string \nimport os\n\nALLOWED_CHARS = string.ascii_letters + \"-,. \\\"()'\"\n\ndef concat_all_data(path : str = 'Data/*.csv', save_path : str = 'Data/final.csv'):\n csvs = glob.glob(path)\n\n li = []\n\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n\n final_df = pd.concat(li)\n\n final_df.to_csv(save_path)\n\ndef clean_csv(path : str, save_pth : str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n\n df.to_csv(save_pth)\n\ndef remove_dups_df(df : pd.DataFrame):\n df.sort_values(\"name\", inplace = True)\n df.drop_duplicates(subset=\"name\", keep=False, inplace=True)\n\n return df\n\ndef remove_invalid_rows_df(df : pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\ndf = pd.DataFrame(columns=['count', 'name'])\n\nf = open(\"fbnames.txt\", \"r\")\ncount = 0\nsave_every = 2000\n\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count':split[0], 'name':split[1].capitalize()}, ignore_index=True)\n \n if count % save_every == 0:\n df.to_csv(\"fbnames.csv\")\n\ndf.to_csv(\"fbnames.csv\")\n\n\nfiles = os.listdir(\"namesbystate/\")\n\ndf = pd.DataFrame(columns=['count', 'name'])\n\n\n\ncount = 0\nsave_every = 2000\n\nfor file in files:\n f = open(f\"namesbystate\\{file}\", \"r\")\n count = 0\n for line in f:\n count += 1\n split = line.split(\",\")\n df = df.append({\"count\":int(split[4]),\"name\":split[3]}, ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv(\"namesbystates.csv\")\n\ndf.groupby(['name']).sum()\ndf.to_csv(\"namesbystates.csv\")",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Server:
def __init__(self):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
def agree_params(self, n, g, password):
self.n = n
self.g = g
self.generate_password_params(password)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def compute_hashes(self):
self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),
self.private_key, self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
<|reserved_special_token_0|>
class Client:
def __init__(self, n, g, password):
self.n = n
self.g = g
self.password = password
self.private_key = random.randint(0, 2 ** 100)
def agree_params(self, server):
server.agree_params(self.n, self.g, self.password)
def accept_salt_public_key_u(self, salt, server_public_key, u):
self.salt = salt
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.server_public_key = server_public_key
self.u = u
def send_public_key(self, server):
self.public_key = pow(self.g, self.private_key, self.n)
server.accept_public_key(self.public_key)
def compute_hashes(self):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + self.password.encode('ascii'))
x = int(hasher.digest().hex(), 16)
self.s = pow(self.server_public_key, self.private_key + self.u * x,
self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, server):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
client_hmac = hasher.digest().hex()
if server.authenticate(client_hmac):
print('Successfully authenticated')
else:
raise Exception('Failed to authenticate')
class BadServer(Server):
def __init__(self, n, g):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
self.n = n
self.g = g
def compute_hashes(self):
pass
def authenticate(self, client_hmac):
self.client_hmac = client_hmac
return True
def load_dict(self, path_to_dict):
with open(path_to_dict) as dict_file:
self.valid_words = set(dict_file.read().split())
def crack_password(self, path_to_dict):
self.load_dict(path_to_dict)
for w in self.valid_words:
hasher_x = hashlib.sha256()
hasher_x.update(self.salt_bytes + w.encode('ascii'))
x = int(hasher_x.digest().hex(), 16)
v = pow(self.g, x, self.n)
s = pow(self.client_public_key * pow(v, self.u, self.n), self.
private_key, self.n)
s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))
hasher_k = hashlib.sha256()
hasher_k.update(s_bytes)
k = hasher_k.digest()
hasher_hmac = hashlib.sha256()
hasher_hmac.update(k + self.salt_bytes)
check_hmac = hasher_hmac.digest().hex()
if check_hmac == self.client_hmac:
print('Successfully cracked password. Password = {}'.format(w))
return
raise Exception('Failed to crack password')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Server:
def __init__(self):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
def agree_params(self, n, g, password):
self.n = n
self.g = g
self.generate_password_params(password)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def accept_public_key(self, client_public_key):
self.client_public_key = client_public_key
def compute_hashes(self):
self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),
self.private_key, self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, client_hmac):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
check_hmac = hasher.digest().hex()
if check_hmac == client_hmac:
return True
else:
print(check_hmac, client_hmac)
return False
class Client:
def __init__(self, n, g, password):
self.n = n
self.g = g
self.password = password
self.private_key = random.randint(0, 2 ** 100)
def agree_params(self, server):
server.agree_params(self.n, self.g, self.password)
def accept_salt_public_key_u(self, salt, server_public_key, u):
self.salt = salt
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.server_public_key = server_public_key
self.u = u
def send_public_key(self, server):
self.public_key = pow(self.g, self.private_key, self.n)
server.accept_public_key(self.public_key)
def compute_hashes(self):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + self.password.encode('ascii'))
x = int(hasher.digest().hex(), 16)
self.s = pow(self.server_public_key, self.private_key + self.u * x,
self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, server):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
client_hmac = hasher.digest().hex()
if server.authenticate(client_hmac):
print('Successfully authenticated')
else:
raise Exception('Failed to authenticate')
class BadServer(Server):
def __init__(self, n, g):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
self.n = n
self.g = g
def compute_hashes(self):
pass
def authenticate(self, client_hmac):
self.client_hmac = client_hmac
return True
def load_dict(self, path_to_dict):
with open(path_to_dict) as dict_file:
self.valid_words = set(dict_file.read().split())
def crack_password(self, path_to_dict):
self.load_dict(path_to_dict)
for w in self.valid_words:
hasher_x = hashlib.sha256()
hasher_x.update(self.salt_bytes + w.encode('ascii'))
x = int(hasher_x.digest().hex(), 16)
v = pow(self.g, x, self.n)
s = pow(self.client_public_key * pow(v, self.u, self.n), self.
private_key, self.n)
s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))
hasher_k = hashlib.sha256()
hasher_k.update(s_bytes)
k = hasher_k.digest()
hasher_hmac = hashlib.sha256()
hasher_hmac.update(k + self.salt_bytes)
check_hmac = hasher_hmac.digest().hex()
if check_hmac == self.client_hmac:
print('Successfully cracked password. Password = {}'.format(w))
return
raise Exception('Failed to crack password')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Server:
def __init__(self):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
def agree_params(self, n, g, password):
self.n = n
self.g = g
self.generate_password_params(password)
<|reserved_special_token_0|>
def send_salt_public_key_u(self, client):
self.public_key = pow(self.g, self.private_key, self.n)
client.accept_salt_public_key_u(self.salt, self.public_key, self.u)
def accept_public_key(self, client_public_key):
self.client_public_key = client_public_key
def compute_hashes(self):
self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),
self.private_key, self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, client_hmac):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
check_hmac = hasher.digest().hex()
if check_hmac == client_hmac:
return True
else:
print(check_hmac, client_hmac)
return False
class Client:
def __init__(self, n, g, password):
self.n = n
self.g = g
self.password = password
self.private_key = random.randint(0, 2 ** 100)
def agree_params(self, server):
server.agree_params(self.n, self.g, self.password)
def accept_salt_public_key_u(self, salt, server_public_key, u):
self.salt = salt
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.server_public_key = server_public_key
self.u = u
def send_public_key(self, server):
self.public_key = pow(self.g, self.private_key, self.n)
server.accept_public_key(self.public_key)
def compute_hashes(self):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + self.password.encode('ascii'))
x = int(hasher.digest().hex(), 16)
self.s = pow(self.server_public_key, self.private_key + self.u * x,
self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, server):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
client_hmac = hasher.digest().hex()
if server.authenticate(client_hmac):
print('Successfully authenticated')
else:
raise Exception('Failed to authenticate')
class BadServer(Server):
def __init__(self, n, g):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
self.n = n
self.g = g
def compute_hashes(self):
pass
def authenticate(self, client_hmac):
self.client_hmac = client_hmac
return True
def load_dict(self, path_to_dict):
with open(path_to_dict) as dict_file:
self.valid_words = set(dict_file.read().split())
def crack_password(self, path_to_dict):
self.load_dict(path_to_dict)
for w in self.valid_words:
hasher_x = hashlib.sha256()
hasher_x.update(self.salt_bytes + w.encode('ascii'))
x = int(hasher_x.digest().hex(), 16)
v = pow(self.g, x, self.n)
s = pow(self.client_public_key * pow(v, self.u, self.n), self.
private_key, self.n)
s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))
hasher_k = hashlib.sha256()
hasher_k.update(s_bytes)
k = hasher_k.digest()
hasher_hmac = hashlib.sha256()
hasher_hmac.update(k + self.salt_bytes)
check_hmac = hasher_hmac.digest().hex()
if check_hmac == self.client_hmac:
print('Successfully cracked password. Password = {}'.format(w))
return
raise Exception('Failed to crack password')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Server:
def __init__(self):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
def agree_params(self, n, g, password):
self.n = n
self.g = g
self.generate_password_params(password)
def generate_password_params(self, password):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + password.encode('ascii'))
x = int(hasher.digest().hex(), 16)
self.v = pow(self.g, x, self.n)
def send_salt_public_key_u(self, client):
self.public_key = pow(self.g, self.private_key, self.n)
client.accept_salt_public_key_u(self.salt, self.public_key, self.u)
def accept_public_key(self, client_public_key):
self.client_public_key = client_public_key
def compute_hashes(self):
self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),
self.private_key, self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, client_hmac):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
check_hmac = hasher.digest().hex()
if check_hmac == client_hmac:
return True
else:
print(check_hmac, client_hmac)
return False
class Client:
def __init__(self, n, g, password):
self.n = n
self.g = g
self.password = password
self.private_key = random.randint(0, 2 ** 100)
def agree_params(self, server):
server.agree_params(self.n, self.g, self.password)
def accept_salt_public_key_u(self, salt, server_public_key, u):
self.salt = salt
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.server_public_key = server_public_key
self.u = u
def send_public_key(self, server):
self.public_key = pow(self.g, self.private_key, self.n)
server.accept_public_key(self.public_key)
def compute_hashes(self):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + self.password.encode('ascii'))
x = int(hasher.digest().hex(), 16)
self.s = pow(self.server_public_key, self.private_key + self.u * x,
self.n)
s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(
self.s))
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, server):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
client_hmac = hasher.digest().hex()
if server.authenticate(client_hmac):
print('Successfully authenticated')
else:
raise Exception('Failed to authenticate')
class BadServer(Server):
def __init__(self, n, g):
self.private_key = random.randint(0, 2 ** 100)
self.salt = random.randint(0, 2 ** 100)
self.salt_bytes = self.salt.to_bytes(byteorder='big', length=
get_num_byte_len(self.salt))
self.u = random.randint(0, 2 ** 128)
self.n = n
self.g = g
def compute_hashes(self):
pass
def authenticate(self, client_hmac):
self.client_hmac = client_hmac
return True
def load_dict(self, path_to_dict):
with open(path_to_dict) as dict_file:
self.valid_words = set(dict_file.read().split())
def crack_password(self, path_to_dict):
self.load_dict(path_to_dict)
for w in self.valid_words:
hasher_x = hashlib.sha256()
hasher_x.update(self.salt_bytes + w.encode('ascii'))
x = int(hasher_x.digest().hex(), 16)
v = pow(self.g, x, self.n)
s = pow(self.client_public_key * pow(v, self.u, self.n), self.
private_key, self.n)
s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))
hasher_k = hashlib.sha256()
hasher_k.update(s_bytes)
k = hasher_k.digest()
hasher_hmac = hashlib.sha256()
hasher_hmac.update(k + self.salt_bytes)
check_hmac = hasher_hmac.digest().hex()
if check_hmac == self.client_hmac:
print('Successfully cracked password. Password = {}'.format(w))
return
raise Exception('Failed to crack password')
def attempt_simple_srp_authenticate(client, server):
client.agree_params(server)
client.send_public_key(server)
server.send_salt_public_key_u(client)
server.compute_hashes()
client.compute_hashes()
client.authenticate(server)
def crack_simple_srp(client, server):
client.send_public_key(server)
server.send_salt_public_key_u(client)
server.compute_hashes()
client.compute_hashes()
client.authenticate(server)
server.crack_password('/Users/Adam/Dev/cryptopals_resources/words.txt')
if __name__ == '__main__':
nist_p_hex = (
'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff'
)
nist_p_bytearr = bytearray.fromhex(nist_p_hex)
n = int.from_bytes(nist_p_bytearr, byteorder='big')
g = 2
password = 'castle'
client = Client(n, g, password)
server = Server()
attempt_simple_srp_authenticate(client, server)
naive_client = Client(n, g, password)
bad_server = BadServer(n, g)
crack_simple_srp(naive_client, bad_server)
<|reserved_special_token_1|>
import hashlib
import math
import random
from set5.ch_4 import get_num_byte_len
class Server:
def __init__(self):
self.private_key = random.randint(0, 2**100)
self.salt = random.randint(0, 2**100)
self.salt_bytes = self.salt.to_bytes(
byteorder="big",
length=get_num_byte_len(self.salt)
)
self.u = random.randint(0, 2**128)
def agree_params(self, n, g, password):
self.n = n
self.g = g
self.generate_password_params(password)
def generate_password_params(self, password):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + password.encode("ascii"))
x = int(hasher.digest().hex(), 16)
self.v = pow(self.g, x, self.n)
def send_salt_public_key_u(self, client):
self.public_key = pow(self.g, self.private_key, self.n)
client.accept_salt_public_key_u(self.salt, self.public_key, self.u)
def accept_public_key(self, client_public_key):
self.client_public_key = client_public_key
def compute_hashes(self):
self.s = pow(self.client_public_key * pow(self.v, self.u, self.n), self.private_key, self.n)
s_bytes = self.s.to_bytes(
byteorder="big",
length=get_num_byte_len(self.s)
)
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, client_hmac):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
check_hmac = hasher.digest().hex()
if check_hmac == client_hmac:
return True
else:
print(check_hmac, client_hmac)
return False
class Client:
def __init__(self, n, g, password):
self.n = n
self.g = g
self.password = password
self.private_key = random.randint(0, 2**100)
def agree_params(self, server):
server.agree_params(self.n, self.g, self.password)
def accept_salt_public_key_u(self, salt, server_public_key, u):
self.salt = salt
self.salt_bytes = self.salt.to_bytes(
byteorder="big",
length=get_num_byte_len(self.salt)
)
self.server_public_key = server_public_key
self.u = u
def send_public_key(self, server):
self.public_key = pow(self.g, self.private_key, self.n)
server.accept_public_key(self.public_key)
def compute_hashes(self):
hasher = hashlib.sha256()
hasher.update(self.salt_bytes + self.password.encode("ascii"))
x = int(hasher.digest().hex(), 16)
self.s = pow(self.server_public_key, self.private_key + (self.u * x), self.n)
s_bytes = self.s.to_bytes(
byteorder="big",
length=get_num_byte_len(self.s)
)
hasher = hashlib.sha256()
hasher.update(s_bytes)
self.k = hasher.digest()
def authenticate(self, server):
hasher = hashlib.sha256()
hasher.update(self.k + self.salt_bytes)
client_hmac = hasher.digest().hex()
if server.authenticate(client_hmac):
print("Successfully authenticated")
else:
raise Exception("Failed to authenticate")
class BadServer(Server):
def __init__(self, n, g):
self.private_key = random.randint(0, 2**100)
self.salt = random.randint(0, 2**100)
self.salt_bytes = self.salt.to_bytes(
byteorder="big",
length=get_num_byte_len(self.salt)
)
self.u = random.randint(0, 2**128)
self.n = n
self.g = g
def compute_hashes(self):
pass
def authenticate(self, client_hmac):
self.client_hmac = client_hmac
return True
def load_dict(self, path_to_dict):
with open(path_to_dict) as dict_file:
self.valid_words = set(dict_file.read().split())
def crack_password(self, path_to_dict):
self.load_dict(path_to_dict)
for w in self.valid_words:
hasher_x = hashlib.sha256()
hasher_x.update(self.salt_bytes + w.encode("ascii"))
x = int(hasher_x.digest().hex(), 16)
v = pow(self.g, x, self.n)
s = pow(self.client_public_key * pow(v, self.u, self.n), self.private_key, self.n)
s_bytes = s.to_bytes(
byteorder="big",
length=get_num_byte_len(s)
)
hasher_k = hashlib.sha256()
hasher_k.update(s_bytes)
k = hasher_k.digest()
hasher_hmac = hashlib.sha256()
hasher_hmac.update(k + self.salt_bytes)
check_hmac = hasher_hmac.digest().hex()
if check_hmac == self.client_hmac:
print("Successfully cracked password. Password = {}".format(w))
return
raise Exception("Failed to crack password")
def attempt_simple_srp_authenticate(client, server):
client.agree_params(server)
client.send_public_key(server)
server.send_salt_public_key_u(client)
server.compute_hashes()
client.compute_hashes()
client.authenticate(server)
def crack_simple_srp(client, server):
client.send_public_key(server)
server.send_salt_public_key_u(client)
server.compute_hashes()
client.compute_hashes()
client.authenticate(server)
server.crack_password("/Users/Adam/Dev/cryptopals_resources/words.txt")
if __name__=="__main__":
nist_p_hex = "ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff"
nist_p_bytearr = bytearray.fromhex(nist_p_hex)
n = int.from_bytes(nist_p_bytearr, byteorder="big")
g = 2
password = "castle"
client = Client(n, g, password)
server = Server()
attempt_simple_srp_authenticate(client, server)
naive_client = Client(n, g, password)
bad_server = BadServer(n, g)
crack_simple_srp(naive_client, bad_server)
|
flexible
|
{
"blob_id": "cf7aeacedec211e76f2bfcb7f6e3cb06dbfdc36e",
"index": 3907,
"step-1": "<mask token>\n\n\nclass Server:\n\n def __init__(self):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n\n def agree_params(self, n, g, password):\n self.n = n\n self.g = g\n self.generate_password_params(password)\n <mask token>\n <mask token>\n <mask token>\n\n def compute_hashes(self):\n self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),\n self.private_key, self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n <mask token>\n\n\nclass Client:\n\n def __init__(self, n, g, password):\n self.n = n\n self.g = g\n self.password = password\n self.private_key = random.randint(0, 2 ** 100)\n\n def agree_params(self, server):\n server.agree_params(self.n, self.g, self.password)\n\n def accept_salt_public_key_u(self, salt, server_public_key, u):\n self.salt = salt\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.server_public_key = server_public_key\n self.u = u\n\n def send_public_key(self, server):\n self.public_key = pow(self.g, self.private_key, self.n)\n server.accept_public_key(self.public_key)\n\n def compute_hashes(self):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + self.password.encode('ascii'))\n x = int(hasher.digest().hex(), 16)\n self.s = pow(self.server_public_key, self.private_key + self.u * x,\n self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, server):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n client_hmac = hasher.digest().hex()\n if server.authenticate(client_hmac):\n print('Successfully authenticated')\n else:\n raise Exception('Failed to authenticate')\n\n\nclass BadServer(Server):\n\n def __init__(self, n, g):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n self.n = n\n self.g = g\n\n def compute_hashes(self):\n pass\n\n def authenticate(self, client_hmac):\n self.client_hmac = client_hmac\n return True\n\n def load_dict(self, path_to_dict):\n with open(path_to_dict) as dict_file:\n self.valid_words = set(dict_file.read().split())\n\n def crack_password(self, path_to_dict):\n self.load_dict(path_to_dict)\n for w in self.valid_words:\n hasher_x = hashlib.sha256()\n hasher_x.update(self.salt_bytes + w.encode('ascii'))\n x = int(hasher_x.digest().hex(), 16)\n v = pow(self.g, x, self.n)\n s = pow(self.client_public_key * pow(v, self.u, self.n), self.\n private_key, self.n)\n s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))\n hasher_k = hashlib.sha256()\n hasher_k.update(s_bytes)\n k = hasher_k.digest()\n hasher_hmac = hashlib.sha256()\n hasher_hmac.update(k + self.salt_bytes)\n check_hmac = hasher_hmac.digest().hex()\n if check_hmac == self.client_hmac:\n print('Successfully cracked password. Password = {}'.format(w))\n return\n raise Exception('Failed to crack password')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Server:\n\n def __init__(self):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n\n def agree_params(self, n, g, password):\n self.n = n\n self.g = g\n self.generate_password_params(password)\n <mask token>\n <mask token>\n\n def accept_public_key(self, client_public_key):\n self.client_public_key = client_public_key\n\n def compute_hashes(self):\n self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),\n self.private_key, self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, client_hmac):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n check_hmac = hasher.digest().hex()\n if check_hmac == client_hmac:\n return True\n else:\n print(check_hmac, client_hmac)\n return False\n\n\nclass Client:\n\n def __init__(self, n, g, password):\n self.n = n\n self.g = g\n self.password = password\n self.private_key = random.randint(0, 2 ** 100)\n\n def agree_params(self, server):\n server.agree_params(self.n, self.g, self.password)\n\n def accept_salt_public_key_u(self, salt, server_public_key, u):\n self.salt = salt\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.server_public_key = server_public_key\n self.u = u\n\n def send_public_key(self, server):\n self.public_key = pow(self.g, self.private_key, self.n)\n server.accept_public_key(self.public_key)\n\n def compute_hashes(self):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + self.password.encode('ascii'))\n x = int(hasher.digest().hex(), 16)\n self.s = pow(self.server_public_key, self.private_key + self.u * x,\n self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, server):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n client_hmac = hasher.digest().hex()\n if server.authenticate(client_hmac):\n print('Successfully authenticated')\n else:\n raise Exception('Failed to authenticate')\n\n\nclass BadServer(Server):\n\n def __init__(self, n, g):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n self.n = n\n self.g = g\n\n def compute_hashes(self):\n pass\n\n def authenticate(self, client_hmac):\n self.client_hmac = client_hmac\n return True\n\n def load_dict(self, path_to_dict):\n with open(path_to_dict) as dict_file:\n self.valid_words = set(dict_file.read().split())\n\n def crack_password(self, path_to_dict):\n self.load_dict(path_to_dict)\n for w in self.valid_words:\n hasher_x = hashlib.sha256()\n hasher_x.update(self.salt_bytes + w.encode('ascii'))\n x = int(hasher_x.digest().hex(), 16)\n v = pow(self.g, x, self.n)\n s = pow(self.client_public_key * pow(v, self.u, self.n), self.\n private_key, self.n)\n s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))\n hasher_k = hashlib.sha256()\n hasher_k.update(s_bytes)\n k = hasher_k.digest()\n hasher_hmac = hashlib.sha256()\n hasher_hmac.update(k + self.salt_bytes)\n check_hmac = hasher_hmac.digest().hex()\n if check_hmac == self.client_hmac:\n print('Successfully cracked password. Password = {}'.format(w))\n return\n raise Exception('Failed to crack password')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Server:\n\n def __init__(self):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n\n def agree_params(self, n, g, password):\n self.n = n\n self.g = g\n self.generate_password_params(password)\n <mask token>\n\n def send_salt_public_key_u(self, client):\n self.public_key = pow(self.g, self.private_key, self.n)\n client.accept_salt_public_key_u(self.salt, self.public_key, self.u)\n\n def accept_public_key(self, client_public_key):\n self.client_public_key = client_public_key\n\n def compute_hashes(self):\n self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),\n self.private_key, self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, client_hmac):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n check_hmac = hasher.digest().hex()\n if check_hmac == client_hmac:\n return True\n else:\n print(check_hmac, client_hmac)\n return False\n\n\nclass Client:\n\n def __init__(self, n, g, password):\n self.n = n\n self.g = g\n self.password = password\n self.private_key = random.randint(0, 2 ** 100)\n\n def agree_params(self, server):\n server.agree_params(self.n, self.g, self.password)\n\n def accept_salt_public_key_u(self, salt, server_public_key, u):\n self.salt = salt\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.server_public_key = server_public_key\n self.u = u\n\n def send_public_key(self, server):\n self.public_key = pow(self.g, self.private_key, self.n)\n server.accept_public_key(self.public_key)\n\n def compute_hashes(self):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + self.password.encode('ascii'))\n x = int(hasher.digest().hex(), 16)\n self.s = pow(self.server_public_key, self.private_key + self.u * x,\n self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, server):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n client_hmac = hasher.digest().hex()\n if server.authenticate(client_hmac):\n print('Successfully authenticated')\n else:\n raise Exception('Failed to authenticate')\n\n\nclass BadServer(Server):\n\n def __init__(self, n, g):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n self.n = n\n self.g = g\n\n def compute_hashes(self):\n pass\n\n def authenticate(self, client_hmac):\n self.client_hmac = client_hmac\n return True\n\n def load_dict(self, path_to_dict):\n with open(path_to_dict) as dict_file:\n self.valid_words = set(dict_file.read().split())\n\n def crack_password(self, path_to_dict):\n self.load_dict(path_to_dict)\n for w in self.valid_words:\n hasher_x = hashlib.sha256()\n hasher_x.update(self.salt_bytes + w.encode('ascii'))\n x = int(hasher_x.digest().hex(), 16)\n v = pow(self.g, x, self.n)\n s = pow(self.client_public_key * pow(v, self.u, self.n), self.\n private_key, self.n)\n s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))\n hasher_k = hashlib.sha256()\n hasher_k.update(s_bytes)\n k = hasher_k.digest()\n hasher_hmac = hashlib.sha256()\n hasher_hmac.update(k + self.salt_bytes)\n check_hmac = hasher_hmac.digest().hex()\n if check_hmac == self.client_hmac:\n print('Successfully cracked password. Password = {}'.format(w))\n return\n raise Exception('Failed to crack password')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Server:\n\n def __init__(self):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n\n def agree_params(self, n, g, password):\n self.n = n\n self.g = g\n self.generate_password_params(password)\n\n def generate_password_params(self, password):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + password.encode('ascii'))\n x = int(hasher.digest().hex(), 16)\n self.v = pow(self.g, x, self.n)\n\n def send_salt_public_key_u(self, client):\n self.public_key = pow(self.g, self.private_key, self.n)\n client.accept_salt_public_key_u(self.salt, self.public_key, self.u)\n\n def accept_public_key(self, client_public_key):\n self.client_public_key = client_public_key\n\n def compute_hashes(self):\n self.s = pow(self.client_public_key * pow(self.v, self.u, self.n),\n self.private_key, self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, client_hmac):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n check_hmac = hasher.digest().hex()\n if check_hmac == client_hmac:\n return True\n else:\n print(check_hmac, client_hmac)\n return False\n\n\nclass Client:\n\n def __init__(self, n, g, password):\n self.n = n\n self.g = g\n self.password = password\n self.private_key = random.randint(0, 2 ** 100)\n\n def agree_params(self, server):\n server.agree_params(self.n, self.g, self.password)\n\n def accept_salt_public_key_u(self, salt, server_public_key, u):\n self.salt = salt\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.server_public_key = server_public_key\n self.u = u\n\n def send_public_key(self, server):\n self.public_key = pow(self.g, self.private_key, self.n)\n server.accept_public_key(self.public_key)\n\n def compute_hashes(self):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + self.password.encode('ascii'))\n x = int(hasher.digest().hex(), 16)\n self.s = pow(self.server_public_key, self.private_key + self.u * x,\n self.n)\n s_bytes = self.s.to_bytes(byteorder='big', length=get_num_byte_len(\n self.s))\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, server):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n client_hmac = hasher.digest().hex()\n if server.authenticate(client_hmac):\n print('Successfully authenticated')\n else:\n raise Exception('Failed to authenticate')\n\n\nclass BadServer(Server):\n\n def __init__(self, n, g):\n self.private_key = random.randint(0, 2 ** 100)\n self.salt = random.randint(0, 2 ** 100)\n self.salt_bytes = self.salt.to_bytes(byteorder='big', length=\n get_num_byte_len(self.salt))\n self.u = random.randint(0, 2 ** 128)\n self.n = n\n self.g = g\n\n def compute_hashes(self):\n pass\n\n def authenticate(self, client_hmac):\n self.client_hmac = client_hmac\n return True\n\n def load_dict(self, path_to_dict):\n with open(path_to_dict) as dict_file:\n self.valid_words = set(dict_file.read().split())\n\n def crack_password(self, path_to_dict):\n self.load_dict(path_to_dict)\n for w in self.valid_words:\n hasher_x = hashlib.sha256()\n hasher_x.update(self.salt_bytes + w.encode('ascii'))\n x = int(hasher_x.digest().hex(), 16)\n v = pow(self.g, x, self.n)\n s = pow(self.client_public_key * pow(v, self.u, self.n), self.\n private_key, self.n)\n s_bytes = s.to_bytes(byteorder='big', length=get_num_byte_len(s))\n hasher_k = hashlib.sha256()\n hasher_k.update(s_bytes)\n k = hasher_k.digest()\n hasher_hmac = hashlib.sha256()\n hasher_hmac.update(k + self.salt_bytes)\n check_hmac = hasher_hmac.digest().hex()\n if check_hmac == self.client_hmac:\n print('Successfully cracked password. Password = {}'.format(w))\n return\n raise Exception('Failed to crack password')\n\n\ndef attempt_simple_srp_authenticate(client, server):\n client.agree_params(server)\n client.send_public_key(server)\n server.send_salt_public_key_u(client)\n server.compute_hashes()\n client.compute_hashes()\n client.authenticate(server)\n\n\ndef crack_simple_srp(client, server):\n client.send_public_key(server)\n server.send_salt_public_key_u(client)\n server.compute_hashes()\n client.compute_hashes()\n client.authenticate(server)\n server.crack_password('/Users/Adam/Dev/cryptopals_resources/words.txt')\n\n\nif __name__ == '__main__':\n nist_p_hex = (\n 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff'\n )\n nist_p_bytearr = bytearray.fromhex(nist_p_hex)\n n = int.from_bytes(nist_p_bytearr, byteorder='big')\n g = 2\n password = 'castle'\n client = Client(n, g, password)\n server = Server()\n attempt_simple_srp_authenticate(client, server)\n naive_client = Client(n, g, password)\n bad_server = BadServer(n, g)\n crack_simple_srp(naive_client, bad_server)\n",
"step-5": "import hashlib\nimport math\nimport random \n\nfrom set5.ch_4 import get_num_byte_len\n\nclass Server:\n def __init__(self):\n self.private_key = random.randint(0, 2**100)\n self.salt = random.randint(0, 2**100)\n self.salt_bytes = self.salt.to_bytes(\n byteorder=\"big\", \n length=get_num_byte_len(self.salt)\n )\n self.u = random.randint(0, 2**128)\n\n def agree_params(self, n, g, password):\n self.n = n\n self.g = g\n self.generate_password_params(password)\n\n def generate_password_params(self, password):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + password.encode(\"ascii\"))\n x = int(hasher.digest().hex(), 16)\n self.v = pow(self.g, x, self.n)\n\n def send_salt_public_key_u(self, client):\n self.public_key = pow(self.g, self.private_key, self.n)\n client.accept_salt_public_key_u(self.salt, self.public_key, self.u)\n\n def accept_public_key(self, client_public_key):\n self.client_public_key = client_public_key\n\n def compute_hashes(self):\n self.s = pow(self.client_public_key * pow(self.v, self.u, self.n), self.private_key, self.n)\n s_bytes = self.s.to_bytes(\n byteorder=\"big\", \n length=get_num_byte_len(self.s)\n )\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, client_hmac):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n check_hmac = hasher.digest().hex()\n if check_hmac == client_hmac:\n return True\n else:\n print(check_hmac, client_hmac)\n return False\n\nclass Client:\n def __init__(self, n, g, password):\n self.n = n\n self.g = g\n self.password = password\n self.private_key = random.randint(0, 2**100)\n\n def agree_params(self, server):\n server.agree_params(self.n, self.g, self.password)\n\n def accept_salt_public_key_u(self, salt, server_public_key, u):\n self.salt = salt\n self.salt_bytes = self.salt.to_bytes(\n byteorder=\"big\", \n length=get_num_byte_len(self.salt)\n )\n self.server_public_key = server_public_key\n self.u = u\n\n def send_public_key(self, server):\n self.public_key = pow(self.g, self.private_key, self.n)\n server.accept_public_key(self.public_key)\n\n def compute_hashes(self):\n hasher = hashlib.sha256()\n hasher.update(self.salt_bytes + self.password.encode(\"ascii\"))\n x = int(hasher.digest().hex(), 16)\n self.s = pow(self.server_public_key, self.private_key + (self.u * x), self.n)\n s_bytes = self.s.to_bytes(\n byteorder=\"big\", \n length=get_num_byte_len(self.s)\n )\n hasher = hashlib.sha256()\n hasher.update(s_bytes)\n self.k = hasher.digest()\n\n def authenticate(self, server):\n hasher = hashlib.sha256()\n hasher.update(self.k + self.salt_bytes)\n client_hmac = hasher.digest().hex()\n if server.authenticate(client_hmac):\n print(\"Successfully authenticated\") \n else:\n raise Exception(\"Failed to authenticate\")\n\n\nclass BadServer(Server):\n def __init__(self, n, g):\n self.private_key = random.randint(0, 2**100)\n self.salt = random.randint(0, 2**100)\n self.salt_bytes = self.salt.to_bytes(\n byteorder=\"big\", \n length=get_num_byte_len(self.salt)\n )\n self.u = random.randint(0, 2**128)\n self.n = n\n self.g = g\n\n \n def compute_hashes(self):\n pass\n\n def authenticate(self, client_hmac):\n self.client_hmac = client_hmac \n return True\n\n def load_dict(self, path_to_dict):\n with open(path_to_dict) as dict_file:\n self.valid_words = set(dict_file.read().split())\n\n def crack_password(self, path_to_dict):\n self.load_dict(path_to_dict)\n for w in self.valid_words:\n hasher_x = hashlib.sha256()\n hasher_x.update(self.salt_bytes + w.encode(\"ascii\"))\n x = int(hasher_x.digest().hex(), 16)\n v = pow(self.g, x, self.n)\n s = pow(self.client_public_key * pow(v, self.u, self.n), self.private_key, self.n)\n s_bytes = s.to_bytes(\n byteorder=\"big\", \n length=get_num_byte_len(s)\n )\n hasher_k = hashlib.sha256() \n hasher_k.update(s_bytes)\n k = hasher_k.digest()\n hasher_hmac = hashlib.sha256()\n hasher_hmac.update(k + self.salt_bytes)\n check_hmac = hasher_hmac.digest().hex()\n if check_hmac == self.client_hmac:\n print(\"Successfully cracked password. Password = {}\".format(w))\n return\n raise Exception(\"Failed to crack password\") \n\n \n\ndef attempt_simple_srp_authenticate(client, server):\n client.agree_params(server)\n client.send_public_key(server)\n server.send_salt_public_key_u(client)\n server.compute_hashes()\n client.compute_hashes()\n client.authenticate(server)\n\ndef crack_simple_srp(client, server):\n client.send_public_key(server)\n server.send_salt_public_key_u(client)\n server.compute_hashes()\n client.compute_hashes()\n client.authenticate(server)\n server.crack_password(\"/Users/Adam/Dev/cryptopals_resources/words.txt\")\n\nif __name__==\"__main__\":\n nist_p_hex = \"ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\"\n nist_p_bytearr = bytearray.fromhex(nist_p_hex)\n n = int.from_bytes(nist_p_bytearr, byteorder=\"big\")\n g = 2\n \n password = \"castle\"\n\n client = Client(n, g, password)\n server = Server()\n attempt_simple_srp_authenticate(client, server)\n\n naive_client = Client(n, g, password)\n bad_server = BadServer(n, g)\n crack_simple_srp(naive_client, bad_server)\n",
"step-ids": [
17,
19,
20,
24,
26
]
}
|
[
17,
19,
20,
24,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
router.register('species', views.SpeciesViewSet)
router.register('com_names', views.Com_NamesViewSet)
router.register('photos', views.PhotosViewSet)
<|reserved_special_token_0|>
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
router = routers.DefaultRouter()
router.register('species', views.SpeciesViewSet)
router.register('com_names', views.Com_NamesViewSet)
router.register('photos', views.PhotosViewSet)
urlpatterns = [path('admin/', admin.site.urls), path('api/', include(router
.urls)), path('api-auth/', include('rest_framework.urls', namespace=
'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',
views.view_species, name='view_species'), path('', views.landing, name=
'landing'), path('model_json/', views.model_json, name='model_json')]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from rest_framework import routers
from BugBytes import views
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register('species', views.SpeciesViewSet)
router.register('com_names', views.Com_NamesViewSet)
router.register('photos', views.PhotosViewSet)
urlpatterns = [path('admin/', admin.site.urls), path('api/', include(router
.urls)), path('api-auth/', include('rest_framework.urls', namespace=
'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',
views.view_species, name='view_species'), path('', views.landing, name=
'landing'), path('model_json/', views.model_json, name='model_json')]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
<|reserved_special_token_1|>
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from rest_framework import routers
from BugBytes import views
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'species', views.SpeciesViewSet)
router.register(r'com_names', views.Com_NamesViewSet)
router.register(r'photos', views.PhotosViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('bugbytes/<int:tensorflow_id>/view_species',
views.view_species, name='view_species'),
path('', views.landing, name='landing'),
path('model_json/', views.model_json, name='model_json'),
]
if settings.DEBUG: # new
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
flexible
|
{
"blob_id": "786bc5d44115b46bd246e85e85c8f8c1f20737b9",
"index": 7921,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\n<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-3": "<mask token>\nrouter = routers.DefaultRouter()\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(router\n .urls)), path('api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'), path('', views.landing, name=\n 'landing'), path('model_json/', views.model_json, name='model_json')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom BugBytes import views\nfrom django.conf.urls.static import static\nrouter = routers.DefaultRouter()\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(router\n .urls)), path('api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'), path('', views.landing, name=\n 'landing'), path('model_json/', views.model_json, name='model_json')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-5": "\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom BugBytes import views\nfrom django.conf.urls.static import static\n\nrouter = routers.DefaultRouter()\nrouter.register(r'species', views.SpeciesViewSet)\nrouter.register(r'com_names', views.Com_NamesViewSet)\nrouter.register(r'photos', views.PhotosViewSet)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'),\n path('', views.landing, name='landing'),\n path('model_json/', views.model_json, name='model_json'),\n]\n\nif settings.DEBUG: # new\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import List
from re import match
from utility import ButtonGroup
import rumps
class RepeatWorkBreak(rumps.App):
def __init__(self):
rumps.debug_mode(True)
self.config = {
"app_title": "Repeat Work and Break",
"start": "Start",
"pause": "Pause Timer",
"continue": "Continue Timer",
"stop": "Stop Timer",
"timeout_message": "Time is up! Take a break :)",
"shift_time_in_seconds": 60 * 60 * 1, # 60 seconds * 60 = 1 hour
"break_time_in_seconds": 60 * 5,
'shift_setting_buttons': [
{
'title': '1 hour',
},
{
'title': '4 hour',
},
{
'title': '8 hour',
}
],
'break_setting_buttons': [
{
'title': '5 minutes',
},
{
'title': '10 minutes',
},
{
'title': '15 minutes',
}
],
}
self.app = rumps.App(self.config['app_title'])
self.timer = rumps.Timer(self.on_tick, 1)
self.shift_setting_button_group = ButtonGroup(
self.config['shift_setting_buttons'], callback=self.handle_shift_setting_button)
self.break_setting_button_group = ButtonGroup(
self.config['break_setting_buttons'], callback=self.handle_shift_setting_button)
self.shift_time_in_seconds = self.config["shift_time_in_seconds"]
self.break_time_in_seconds = self.config["break_time_in_seconds"]
self.elapsed_shift_time_in_hours = 0
self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)
self.start_pause_button = rumps.MenuItem(
title=self.config["start"], callback=self.start_timer)
self.stop_button = rumps.MenuItem(
title=self.config["stop"], callback=None)
self.app.menu = [
{
'Preferences':
{
"Setting Shift": self.shift_setting_button_group.buttons,
"Setting Break / hr": self.break_setting_button_group.buttons,
}
},
None,
self.start_pause_button,
self.stop_button,
]
def set_up_menu(self):
self.timer.stop()
self.timer.count = 0
self.app.title = self.config['app_title']
def convert_seconds_to_time_string(self, seconds) -> str:
seconds = seconds % (24 * 3600)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
def on_tick(self, sender):
time_left_in_seconds = sender.end - sender.count
time_left_in_string = self.convert_seconds_to_time_string(
time_left_in_seconds)
if sender.count != 0 and sender.count % 3600 == 0:
self.elapsed_shift_time_in_hours += 1
self.update_progress_box()
if time_left_in_seconds == 0:
rumps.notification(
title=self.config["app_title"], subtitle=self.config["timeout_message"], message='')
self.stop_timer()
self.stop_button.set_callback(None)
else:
self.stop_button.set_callback(self.stop_timer)
self.app.title = self.progress_box + ' | ' + time_left_in_string
sender.count += 1
def update_progress_box(self):
self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self.shift_time_in_seconds // 3600 -
self.elapsed_shift_time_in_hours) * '◻︎'
def start_timer(self, sender):
if sender.title.lower().startswith(("start", "continue")):
if sender.title == self.config["start"]:
self.timer.count = 0
self.timer.end = self.shift_time_in_seconds
sender.title = self.config["pause"]
self.timer.start()
else:
sender.title = self.config["continue"]
self.timer.stop()
def stop_timer(self, sender=None):
self.set_up_menu()
self.stop_button.set_callback(None)
self.start_pause_button.title = self.config["start"]
def handle_shift_setting_button(self, sender):
self.shift_setting_button_group.toggle(sender)
selected_hours = int(match(r'^\d+\s{1}', sender.title)[0])
self.progress_box = "◻︎" * selected_hours # update empty progress box
self.shift_time_in_seconds = selected_hours * 3600 # hours in seconds
def handle_break_setting_button(self, sender):
self.break_setting_button_group.toggle(sender)
selected_minutes = int(match(r'^\d+\s{1}', sender.title)[0])
self.break_time_in_seconds = selected_minutes * 60
def run(self):
self.app.run()
if __name__ == "__main__":
app = RepeatWorkBreak()
app.run()
|
normal
|
{
"blob_id": "2ca91c410b8c8d6306d5ed918783a4d77a091ba8",
"index": 360,
"step-1": "<mask token>\n\n\nclass RepeatWorkBreak(rumps.App):\n <mask token>\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n <mask token>\n <mask token>\n <mask token>\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n <mask token>\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config['start']\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass RepeatWorkBreak(rumps.App):\n\n def __init__(self):\n rumps.debug_mode(True)\n self.config = {'app_title': 'Repeat Work and Break', 'start':\n 'Start', 'pause': 'Pause Timer', 'continue': 'Continue Timer',\n 'stop': 'Stop Timer', 'timeout_message':\n 'Time is up! Take a break :)', 'shift_time_in_seconds': 60 * 60 *\n 1, 'break_time_in_seconds': 60 * 5, 'shift_setting_buttons': [{\n 'title': '1 hour'}, {'title': '4 hour'}, {'title': '8 hour'}],\n 'break_setting_buttons': [{'title': '5 minutes'}, {'title':\n '10 minutes'}, {'title': '15 minutes'}]}\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(self.config[\n 'shift_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.break_setting_button_group = ButtonGroup(self.config[\n 'break_setting_buttons'], callback=self.handle_shift_setting_button\n )\n self.shift_time_in_seconds = self.config['shift_time_in_seconds']\n self.break_time_in_seconds = self.config['break_time_in_seconds']\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(title=self.config['start'],\n callback=self.start_timer)\n self.stop_button = rumps.MenuItem(title=self.config['stop'],\n callback=None)\n self.app.menu = [{'Preferences': {'Setting Shift': self.\n shift_setting_button_group.buttons, 'Setting Break / hr': self.\n break_setting_button_group.buttons}}, None, self.\n start_pause_button, self.stop_button]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) ->str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return '%d:%02d:%02d' % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(title=self.config['app_title'], subtitle=\n self.config['timeout_message'], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self\n .shift_time_in_seconds // 3600 - self.elapsed_shift_time_in_hours\n ) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith(('start', 'continue')):\n if sender.title == self.config['start']:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config['pause']\n self.timer.start()\n else:\n sender.title = self.config['continue']\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config['start']\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.progress_box = '◻︎' * selected_hours\n self.shift_time_in_seconds = selected_hours * 3600\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match('^\\\\d+\\\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\nif __name__ == '__main__':\n app = RepeatWorkBreak()\n app.run()\n",
"step-5": "from typing import List\nfrom re import match\nfrom utility import ButtonGroup\nimport rumps\n\n\nclass RepeatWorkBreak(rumps.App):\n def __init__(self):\n rumps.debug_mode(True)\n\n self.config = {\n \"app_title\": \"Repeat Work and Break\",\n \"start\": \"Start\",\n \"pause\": \"Pause Timer\",\n \"continue\": \"Continue Timer\",\n \"stop\": \"Stop Timer\",\n \"timeout_message\": \"Time is up! Take a break :)\",\n \"shift_time_in_seconds\": 60 * 60 * 1, # 60 seconds * 60 = 1 hour\n \"break_time_in_seconds\": 60 * 5,\n 'shift_setting_buttons': [\n {\n 'title': '1 hour',\n },\n {\n 'title': '4 hour',\n },\n {\n 'title': '8 hour',\n }\n ],\n 'break_setting_buttons': [\n {\n 'title': '5 minutes',\n },\n {\n 'title': '10 minutes',\n },\n {\n 'title': '15 minutes',\n }\n ],\n }\n self.app = rumps.App(self.config['app_title'])\n self.timer = rumps.Timer(self.on_tick, 1)\n self.shift_setting_button_group = ButtonGroup(\n self.config['shift_setting_buttons'], callback=self.handle_shift_setting_button)\n self.break_setting_button_group = ButtonGroup(\n self.config['break_setting_buttons'], callback=self.handle_shift_setting_button)\n self.shift_time_in_seconds = self.config[\"shift_time_in_seconds\"]\n self.break_time_in_seconds = self.config[\"break_time_in_seconds\"]\n self.elapsed_shift_time_in_hours = 0\n self.progress_box = '◻︎' * (self.shift_time_in_seconds // 3600)\n self.start_pause_button = rumps.MenuItem(\n title=self.config[\"start\"], callback=self.start_timer)\n self.stop_button = rumps.MenuItem(\n title=self.config[\"stop\"], callback=None)\n self.app.menu = [\n {\n 'Preferences':\n {\n \"Setting Shift\": self.shift_setting_button_group.buttons,\n \"Setting Break / hr\": self.break_setting_button_group.buttons,\n }\n },\n None,\n self.start_pause_button,\n self.stop_button,\n ]\n\n def set_up_menu(self):\n self.timer.stop()\n self.timer.count = 0\n self.app.title = self.config['app_title']\n\n def convert_seconds_to_time_string(self, seconds) -> str:\n seconds = seconds % (24 * 3600)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n\n return \"%d:%02d:%02d\" % (hours, minutes, seconds)\n\n def on_tick(self, sender):\n time_left_in_seconds = sender.end - sender.count\n\n time_left_in_string = self.convert_seconds_to_time_string(\n time_left_in_seconds)\n if sender.count != 0 and sender.count % 3600 == 0:\n self.elapsed_shift_time_in_hours += 1\n self.update_progress_box()\n if time_left_in_seconds == 0:\n rumps.notification(\n title=self.config[\"app_title\"], subtitle=self.config[\"timeout_message\"], message='')\n self.stop_timer()\n self.stop_button.set_callback(None)\n else:\n self.stop_button.set_callback(self.stop_timer)\n\n self.app.title = self.progress_box + ' | ' + time_left_in_string\n sender.count += 1\n\n def update_progress_box(self):\n self.progress_box = self.elapsed_shift_time_in_hours * '☑︎' + (self.shift_time_in_seconds // 3600 -\n self.elapsed_shift_time_in_hours) * '◻︎'\n\n def start_timer(self, sender):\n if sender.title.lower().startswith((\"start\", \"continue\")):\n if sender.title == self.config[\"start\"]:\n self.timer.count = 0\n self.timer.end = self.shift_time_in_seconds\n sender.title = self.config[\"pause\"]\n self.timer.start()\n else:\n sender.title = self.config[\"continue\"]\n self.timer.stop()\n\n def stop_timer(self, sender=None):\n self.set_up_menu()\n self.stop_button.set_callback(None)\n self.start_pause_button.title = self.config[\"start\"]\n\n def handle_shift_setting_button(self, sender):\n self.shift_setting_button_group.toggle(sender)\n selected_hours = int(match(r'^\\d+\\s{1}', sender.title)[0])\n self.progress_box = \"◻︎\" * selected_hours # update empty progress box\n self.shift_time_in_seconds = selected_hours * 3600 # hours in seconds\n\n def handle_break_setting_button(self, sender):\n self.break_setting_button_group.toggle(sender)\n selected_minutes = int(match(r'^\\d+\\s{1}', sender.title)[0])\n self.break_time_in_seconds = selected_minutes * 60\n\n def run(self):\n self.app.run()\n\n\nif __name__ == \"__main__\":\n app = RepeatWorkBreak()\n app.run()\n",
"step-ids": [
7,
10,
11,
12,
14
]
}
|
[
7,
10,
11,
12,
14
] |
from OpenSSL import SSL, crypto
from twisted.internet import ssl, reactor
from twisted.internet.protocol import Factory, Protocol
import os
from time import time
class Echo(Protocol):
def dataReceived(self, data):
print "Data received: " + data
# define cases
options = {
"generate": self.generateCertificate,
"sign": self.signCertificate
}
tmp = data.split(';')
method = tmp.pop(0)
print "method is " + method
#TODO: catch unknown cases
# delegate case to method
result = options[method](tmp)
self.transport.write(result)
def generateCertificate(self, userDataList):
# generate a key-pair with RSA and 2048 bits
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
# create a new certificate of x509 structure
x509 = crypto.X509()
# X509Name type
subject = self.setSubject(x509.get_subject(), userDataList)
#x509.set_subject(subject)
# list of (name, value) tuples
subComponents = subject.get_components()
for (name, value) in subComponents:
print name + " is " + value
# cert is valid immediately
x509.gmtime_adj_notBefore(0)
# cert gets invalid after 10 years
x509.gmtime_adj_notAfter(10*365*24*60*60)
#TODO: load our CA root cert(PKCS12 type) and set subject as issuer
# set issuer (CA) data
x509.set_issuer(x509.get_subject())
print "Issuer set - ACTUALLY SELF-SIGNED MODE!!!"
# set user public key
x509.set_pubkey(pkey)
#TODO: which algorithm to use? (replace with sha512)
#TODO: replace key with CA private key
# sign the certificate
x509.sign(pkey, 'sha256')
print "Certificate signed - ACTUALLY SELF-SIGNED MODE!!!"
# create a new PKCS12 object
pkcs12 = crypto.PKCS12()
# set the new user certificate
pkcs12.set_certificate(x509)
# insert user private key
pkcs12.set_privatekey(pkey)
# create a dump of PKCS12 and return
return pkcs12.export()
def setSubject(self, subject, data):
#subjectVariables = {
# "C": subject.C,
# "ST": subject.ST,
# "L": subject.L,
# "O": subject.O,
# "OU": subject.OU,
# "CN": subject.CN
#}
for d in data:
s = d.split('=')
variable = s[0]
value = s[1]
print "Setting variable " + variable + " to " + value + " on subject"
#subjectVariables[variable] = value
if variable == "C":
subject.C = value
elif variable == "ST":
subject.ST = value
elif variable == "L":
subject.L = value
elif variable == "O":
subject.O = value
elif variable == "OU":
subject.OU = value
elif variable == "CN":
subject.CN = value
return subject
def signCertificate(self, certData):
x509 = crypto.X509()
pkcs12 = crypto.load_pkcs12(certData)
req = pkcs12.get_certificate()
x509.set_subject(req.get_subject())
x509.set_pubkey(req.get_pubkey())
#issuer aus Datei setzen
# cert is valid immediately
x509.gmtime_adj_notBefore(0)
# cert gets invalid after 10 years
x509.gmtime_adj_notAfter(10*365*24*60*60)
x509.sign(pkey, 'sha256')
pkcs12.set_certificate(x509)
return pkcs12.export()
def verifyCallback(connection, x509, errnum, errdepth, ok):
if not ok:
print 'invalid cert from subject:', x509.get_subject()
return False
else:
print "Certs are fine", x509.get_subject()
return True
def getTimestamp():
return str(int(round(time() * 1000)))
def addTimestamp(millis, name):
print millis + '_' + name
if __name__ == '__main__':
factory = Factory()
factory.protocol = Echo
os.system("echo 'Server started...'")
myContextFactory = ssl.DefaultOpenSSLContextFactory(
'keys/ca-key.pem', 'keys/ca-root.pem'
)
ctx = myContextFactory.getContext()
# SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true)
# VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat
# verwendet wird (setzt obigen Parameer vorraus!)
ctx.set_verify(
SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
verifyCallback
)
# Since we have self-signed certs we have to explicitly
# tell the server to trust them.
ctx.load_verify_locations("keys/ca-root.pem")
reactor.listenSSL(8000, factory, myContextFactory)
reactor.run()
|
normal
|
{
"blob_id": "9951588f581c5045154a77535b36d230d586d8a5",
"index": 338,
"step-1": "from OpenSSL import SSL, crypto\nfrom twisted.internet import ssl, reactor\nfrom twisted.internet.protocol import Factory, Protocol\n\nimport os\nfrom time import time\n\nclass Echo(Protocol):\n\n def dataReceived(self, data):\n print \"Data received: \" + data\n\n # define cases\n options = {\n \"generate\": self.generateCertificate,\n \"sign\": self.signCertificate\n }\n \n tmp = data.split(';')\n method = tmp.pop(0)\n print \"method is \" + method\n \n #TODO: catch unknown cases\n # delegate case to method\n result = options[method](tmp)\n \n self.transport.write(result)\n\n def generateCertificate(self, userDataList):\n # generate a key-pair with RSA and 2048 bits\n pkey = crypto.PKey()\n pkey.generate_key(crypto.TYPE_RSA, 2048)\n \n # create a new certificate of x509 structure\n x509 = crypto.X509()\n \n # X509Name type\n subject = self.setSubject(x509.get_subject(), userDataList)\n #x509.set_subject(subject)\n \n # list of (name, value) tuples\n subComponents = subject.get_components()\n for (name, value) in subComponents:\n print name + \" is \" + value\n \n # cert is valid immediately\n x509.gmtime_adj_notBefore(0)\n \n # cert gets invalid after 10 years\n x509.gmtime_adj_notAfter(10*365*24*60*60)\n \n #TODO: load our CA root cert(PKCS12 type) and set subject as issuer\n # set issuer (CA) data\n x509.set_issuer(x509.get_subject())\n print \"Issuer set - ACTUALLY SELF-SIGNED MODE!!!\"\n \n # set user public key\n x509.set_pubkey(pkey)\n \n #TODO: which algorithm to use? (replace with sha512)\n #TODO: replace key with CA private key\n # sign the certificate\n x509.sign(pkey, 'sha256')\n print \"Certificate signed - ACTUALLY SELF-SIGNED MODE!!!\"\n \n # create a new PKCS12 object\n pkcs12 = crypto.PKCS12()\n \n # set the new user certificate\n pkcs12.set_certificate(x509)\n \n # insert user private key\n pkcs12.set_privatekey(pkey)\n \n # create a dump of PKCS12 and return\n return pkcs12.export()\n \n def setSubject(self, subject, data):\n #subjectVariables = {\n # \"C\": subject.C,\n # \"ST\": subject.ST,\n # \"L\": subject.L,\n # \"O\": subject.O,\n # \"OU\": subject.OU,\n # \"CN\": subject.CN\n #}\n \n for d in data:\n s = d.split('=')\n variable = s[0]\n value = s[1]\n print \"Setting variable \" + variable + \" to \" + value + \" on subject\"\n #subjectVariables[variable] = value\n if variable == \"C\":\n subject.C = value\n elif variable == \"ST\":\n subject.ST = value\n elif variable == \"L\":\n subject.L = value\n elif variable == \"O\":\n subject.O = value\n elif variable == \"OU\":\n subject.OU = value\n elif variable == \"CN\":\n subject.CN = value\n \n return subject\n \n def signCertificate(self, certData):\n\n x509 = crypto.X509()\n pkcs12 = crypto.load_pkcs12(certData)\n req = pkcs12.get_certificate()\n x509.set_subject(req.get_subject())\n x509.set_pubkey(req.get_pubkey())\n\n #issuer aus Datei setzen\n\n # cert is valid immediately\n x509.gmtime_adj_notBefore(0)\n \n # cert gets invalid after 10 years\n x509.gmtime_adj_notAfter(10*365*24*60*60)\n\n x509.sign(pkey, 'sha256')\n\n pkcs12.set_certificate(x509)\n\n return pkcs12.export()\n \n\ndef verifyCallback(connection, x509, errnum, errdepth, ok):\n if not ok:\n print 'invalid cert from subject:', x509.get_subject()\n return False\n else:\n print \"Certs are fine\", x509.get_subject()\n return True\n\ndef getTimestamp():\n return str(int(round(time() * 1000)))\n\ndef addTimestamp(millis, name):\n print millis + '_' + name\n\nif __name__ == '__main__':\n factory = Factory()\n factory.protocol = Echo\n\n os.system(\"echo 'Server started...'\")\n\n myContextFactory = ssl.DefaultOpenSSLContextFactory(\n 'keys/ca-key.pem', 'keys/ca-root.pem'\n )\n\n ctx = myContextFactory.getContext()\n\n # SSL.VERIFY_PEER: Verifizierung des verwendeten SSL-Certs vorraussetzen (default=true)\n # VERIFY_FAIL_IF_NO_PEER_CERT: Vorgang wird abgebrochen, wenn die Verbindung ohne Zertifikat \n # verwendet wird (setzt obigen Parameer vorraus!)\n ctx.set_verify(\n SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n verifyCallback\n )\n\n # Since we have self-signed certs we have to explicitly\n # tell the server to trust them.\n ctx.load_verify_locations(\"keys/ca-root.pem\")\n\n reactor.listenSSL(8000, factory, myContextFactory)\n reactor.run()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def send_sms(sms_text):
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
message = client.messages.create(body='Joing the dark side', from_=os.
getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'
], to=os.getenv('NUMBER_TO'))
return message.sid
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv()
<|reserved_special_token_0|>
def get_status(user_id):
params = {'user_ids': user_id, 'V': os.getenv('API_V'), 'access_token':
os.getenv('ACCESS_TOKEN'), 'fields': 'online'}
friends_status = requests.post(BASE_URL, params=params)
return friends_status.json()['response'][0]['online']
def send_sms(sms_text):
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
message = client.messages.create(body='Joing the dark side', from_=os.
getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'
], to=os.getenv('NUMBER_TO'))
return message.sid
if __name__ == '__main__':
vk_id = input('Введите id ')
while True:
if get_status(vk_id) == 1:
send_sms(f'{vk_id} сейчас онлайн!')
break
time.sleep(5)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv()
BASE_URL = 'https://api.vk.com/method/users.get'
def get_status(user_id):
params = {'user_ids': user_id, 'V': os.getenv('API_V'), 'access_token':
os.getenv('ACCESS_TOKEN'), 'fields': 'online'}
friends_status = requests.post(BASE_URL, params=params)
return friends_status.json()['response'][0]['online']
def send_sms(sms_text):
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
message = client.messages.create(body='Joing the dark side', from_=os.
getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'
], to=os.getenv('NUMBER_TO'))
return message.sid
if __name__ == '__main__':
vk_id = input('Введите id ')
while True:
if get_status(vk_id) == 1:
send_sms(f'{vk_id} сейчас онлайн!')
break
time.sleep(5)
<|reserved_special_token_1|>
import os
import time
import requests
from dotenv import load_dotenv
from twilio.rest import Client
load_dotenv()
BASE_URL = 'https://api.vk.com/method/users.get'
def get_status(user_id):
params = {'user_ids': user_id, 'V': os.getenv('API_V'), 'access_token':
os.getenv('ACCESS_TOKEN'), 'fields': 'online'}
friends_status = requests.post(BASE_URL, params=params)
return friends_status.json()['response'][0]['online']
def send_sms(sms_text):
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
message = client.messages.create(body='Joing the dark side', from_=os.
getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'
], to=os.getenv('NUMBER_TO'))
return message.sid
if __name__ == '__main__':
vk_id = input('Введите id ')
while True:
if get_status(vk_id) == 1:
send_sms(f'{vk_id} сейчас онлайн!')
break
time.sleep(5)
|
flexible
|
{
"blob_id": "6b2a9e8c6e95f52e9ebf999b81f9170fc669cce4",
"index": 6329,
"step-1": "<mask token>\n\n\ndef send_sms(sms_text):\n account_sid = os.getenv('TWILIO_ACCOUNT_SID')\n auth_token = os.getenv('TWILIO_AUTH_TOKEN')\n client = Client(account_sid, auth_token)\n message = client.messages.create(body='Joing the dark side', from_=os.\n getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'\n ], to=os.getenv('NUMBER_TO'))\n return message.sid\n\n\n<mask token>\n",
"step-2": "<mask token>\nload_dotenv()\n<mask token>\n\n\ndef get_status(user_id):\n params = {'user_ids': user_id, 'V': os.getenv('API_V'), 'access_token':\n os.getenv('ACCESS_TOKEN'), 'fields': 'online'}\n friends_status = requests.post(BASE_URL, params=params)\n return friends_status.json()['response'][0]['online']\n\n\ndef send_sms(sms_text):\n account_sid = os.getenv('TWILIO_ACCOUNT_SID')\n auth_token = os.getenv('TWILIO_AUTH_TOKEN')\n client = Client(account_sid, auth_token)\n message = client.messages.create(body='Joing the dark side', from_=os.\n getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'\n ], to=os.getenv('NUMBER_TO'))\n return message.sid\n\n\nif __name__ == '__main__':\n vk_id = input('Введите id ')\n while True:\n if get_status(vk_id) == 1:\n send_sms(f'{vk_id} сейчас онлайн!')\n break\n time.sleep(5)\n",
"step-3": "<mask token>\nload_dotenv()\nBASE_URL = 'https://api.vk.com/method/users.get'\n\n\ndef get_status(user_id):\n params = {'user_ids': user_id, 'V': os.getenv('API_V'), 'access_token':\n os.getenv('ACCESS_TOKEN'), 'fields': 'online'}\n friends_status = requests.post(BASE_URL, params=params)\n return friends_status.json()['response'][0]['online']\n\n\ndef send_sms(sms_text):\n account_sid = os.getenv('TWILIO_ACCOUNT_SID')\n auth_token = os.getenv('TWILIO_AUTH_TOKEN')\n client = Client(account_sid, auth_token)\n message = client.messages.create(body='Joing the dark side', from_=os.\n getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'\n ], to=os.getenv('NUMBER_TO'))\n return message.sid\n\n\nif __name__ == '__main__':\n vk_id = input('Введите id ')\n while True:\n if get_status(vk_id) == 1:\n send_sms(f'{vk_id} сейчас онлайн!')\n break\n time.sleep(5)\n",
"step-4": "import os\nimport time\nimport requests\nfrom dotenv import load_dotenv\nfrom twilio.rest import Client\nload_dotenv()\nBASE_URL = 'https://api.vk.com/method/users.get'\n\n\ndef get_status(user_id):\n params = {'user_ids': user_id, 'V': os.getenv('API_V'), 'access_token':\n os.getenv('ACCESS_TOKEN'), 'fields': 'online'}\n friends_status = requests.post(BASE_URL, params=params)\n return friends_status.json()['response'][0]['online']\n\n\ndef send_sms(sms_text):\n account_sid = os.getenv('TWILIO_ACCOUNT_SID')\n auth_token = os.getenv('TWILIO_AUTH_TOKEN')\n client = Client(account_sid, auth_token)\n message = client.messages.create(body='Joing the dark side', from_=os.\n getenv('NUMBER_FROM'), media_url=['https://demo.twilio.com/owl.png'\n ], to=os.getenv('NUMBER_TO'))\n return message.sid\n\n\nif __name__ == '__main__':\n vk_id = input('Введите id ')\n while True:\n if get_status(vk_id) == 1:\n send_sms(f'{vk_id} сейчас онлайн!')\n break\n time.sleep(5)\n",
"step-5": null,
"step-ids": [
1,
3,
4,
5
]
}
|
[
1,
3,
4,
5
] |
import numpy as np
import scipy.signal as sp
from common import *
class Processor:
def __init__(self, sr, **kwargs):
self.samprate = float(sr)
self.hopSize = kwargs.get("hopSize", roundUpToPowerOf2(self.samprate * 0.005))
self.olaFac = int(kwargs.get("olaFac", 2))
def analyze(self, x):
assert(self.olaFac > 0)
# constant
nX = len(x)
nHop = getNFrame(nX, self.hopSize)
nFrame = nHop * self.olaFac
nBin = self.hopSize + 1
windowFunc, B, windowMean = getWindow("hanning")
windowSize = 2 * self.hopSize
halfWindowSize = self.hopSize
window = np.sqrt(windowFunc(windowSize))
windowNormFac = 2.0 / (windowMean * windowSize)
# do calculate
magnList = np.zeros((nFrame, nBin), dtype = np.float64)
phaseList = np.zeros((nFrame, nBin), dtype = np.float64)
for iFrame in range(nFrame):
frame = getFrame(x, iFrame * self.hopSize // self.olaFac, windowSize)
frame *= window
tSig = np.zeros(windowSize, dtype = np.float64)
tSig[:halfWindowSize] = frame[halfWindowSize:]
tSig[-halfWindowSize:] = frame[:halfWindowSize]
fSig = np.fft.rfft(tSig)
magnList[iFrame] = np.abs(fSig) * windowNormFac
phaseList[iFrame] = np.unwrap(np.angle(fSig))
return magnList, phaseList
def synth(self, *args):
# constant
nFrame, nBin = args[0].shape
nHop = nFrame // self.olaFac
nOut = nHop * self.hopSize
windowFunc, B, windowMean = getWindow("hanning")
windowSize = 2 * self.hopSize
halfWindowSize = self.hopSize
window = np.sqrt(windowFunc(windowSize))
# check input
assert(nBin == self.hopSize + 1)
# synth
out = np.zeros(nOut, dtype = np.float64)
if(len(args) == 1):
fSigList = args[0]
elif(len(args) == 2):
fSigList = magnPhaseToFSig(*args)
else:
raise ValueError("Bad input.")
fSigList *= halfWindowSize
for iFrame in range(nFrame):
tSig = np.fft.irfft(fSigList[iFrame])
ob, oe, ib, ie = getFrameRange(nOut, iFrame * self.hopSize // self.olaFac, windowSize)
out[ib:ie] += (tSig * window)[ob:oe]
out /= self.olaFac
return out
|
normal
|
{
"blob_id": "e0075e4afafba9da70bbcb2ee073b5c1f7782d7d",
"index": 6032,
"step-1": "<mask token>\n\n\nclass Processor:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Processor:\n <mask token>\n <mask token>\n\n def synth(self, *args):\n nFrame, nBin = args[0].shape\n nHop = nFrame // self.olaFac\n nOut = nHop * self.hopSize\n windowFunc, B, windowMean = getWindow('hanning')\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n assert nBin == self.hopSize + 1\n out = np.zeros(nOut, dtype=np.float64)\n if len(args) == 1:\n fSigList = args[0]\n elif len(args) == 2:\n fSigList = magnPhaseToFSig(*args)\n else:\n raise ValueError('Bad input.')\n fSigList *= halfWindowSize\n for iFrame in range(nFrame):\n tSig = np.fft.irfft(fSigList[iFrame])\n ob, oe, ib, ie = getFrameRange(nOut, iFrame * self.hopSize //\n self.olaFac, windowSize)\n out[ib:ie] += (tSig * window)[ob:oe]\n out /= self.olaFac\n return out\n",
"step-3": "<mask token>\n\n\nclass Processor:\n\n def __init__(self, sr, **kwargs):\n self.samprate = float(sr)\n self.hopSize = kwargs.get('hopSize', roundUpToPowerOf2(self.\n samprate * 0.005))\n self.olaFac = int(kwargs.get('olaFac', 2))\n\n def analyze(self, x):\n assert self.olaFac > 0\n nX = len(x)\n nHop = getNFrame(nX, self.hopSize)\n nFrame = nHop * self.olaFac\n nBin = self.hopSize + 1\n windowFunc, B, windowMean = getWindow('hanning')\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n windowNormFac = 2.0 / (windowMean * windowSize)\n magnList = np.zeros((nFrame, nBin), dtype=np.float64)\n phaseList = np.zeros((nFrame, nBin), dtype=np.float64)\n for iFrame in range(nFrame):\n frame = getFrame(x, iFrame * self.hopSize // self.olaFac,\n windowSize)\n frame *= window\n tSig = np.zeros(windowSize, dtype=np.float64)\n tSig[:halfWindowSize] = frame[halfWindowSize:]\n tSig[-halfWindowSize:] = frame[:halfWindowSize]\n fSig = np.fft.rfft(tSig)\n magnList[iFrame] = np.abs(fSig) * windowNormFac\n phaseList[iFrame] = np.unwrap(np.angle(fSig))\n return magnList, phaseList\n\n def synth(self, *args):\n nFrame, nBin = args[0].shape\n nHop = nFrame // self.olaFac\n nOut = nHop * self.hopSize\n windowFunc, B, windowMean = getWindow('hanning')\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n assert nBin == self.hopSize + 1\n out = np.zeros(nOut, dtype=np.float64)\n if len(args) == 1:\n fSigList = args[0]\n elif len(args) == 2:\n fSigList = magnPhaseToFSig(*args)\n else:\n raise ValueError('Bad input.')\n fSigList *= halfWindowSize\n for iFrame in range(nFrame):\n tSig = np.fft.irfft(fSigList[iFrame])\n ob, oe, ib, ie = getFrameRange(nOut, iFrame * self.hopSize //\n self.olaFac, windowSize)\n out[ib:ie] += (tSig * window)[ob:oe]\n out /= self.olaFac\n return out\n",
"step-4": "import numpy as np\nimport scipy.signal as sp\nfrom common import *\n\n\nclass Processor:\n\n def __init__(self, sr, **kwargs):\n self.samprate = float(sr)\n self.hopSize = kwargs.get('hopSize', roundUpToPowerOf2(self.\n samprate * 0.005))\n self.olaFac = int(kwargs.get('olaFac', 2))\n\n def analyze(self, x):\n assert self.olaFac > 0\n nX = len(x)\n nHop = getNFrame(nX, self.hopSize)\n nFrame = nHop * self.olaFac\n nBin = self.hopSize + 1\n windowFunc, B, windowMean = getWindow('hanning')\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n windowNormFac = 2.0 / (windowMean * windowSize)\n magnList = np.zeros((nFrame, nBin), dtype=np.float64)\n phaseList = np.zeros((nFrame, nBin), dtype=np.float64)\n for iFrame in range(nFrame):\n frame = getFrame(x, iFrame * self.hopSize // self.olaFac,\n windowSize)\n frame *= window\n tSig = np.zeros(windowSize, dtype=np.float64)\n tSig[:halfWindowSize] = frame[halfWindowSize:]\n tSig[-halfWindowSize:] = frame[:halfWindowSize]\n fSig = np.fft.rfft(tSig)\n magnList[iFrame] = np.abs(fSig) * windowNormFac\n phaseList[iFrame] = np.unwrap(np.angle(fSig))\n return magnList, phaseList\n\n def synth(self, *args):\n nFrame, nBin = args[0].shape\n nHop = nFrame // self.olaFac\n nOut = nHop * self.hopSize\n windowFunc, B, windowMean = getWindow('hanning')\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n assert nBin == self.hopSize + 1\n out = np.zeros(nOut, dtype=np.float64)\n if len(args) == 1:\n fSigList = args[0]\n elif len(args) == 2:\n fSigList = magnPhaseToFSig(*args)\n else:\n raise ValueError('Bad input.')\n fSigList *= halfWindowSize\n for iFrame in range(nFrame):\n tSig = np.fft.irfft(fSigList[iFrame])\n ob, oe, ib, ie = getFrameRange(nOut, iFrame * self.hopSize //\n self.olaFac, windowSize)\n out[ib:ie] += (tSig * window)[ob:oe]\n out /= self.olaFac\n return out\n",
"step-5": "import numpy as np\nimport scipy.signal as sp\n\nfrom common import *\n\nclass Processor:\n def __init__(self, sr, **kwargs):\n self.samprate = float(sr)\n self.hopSize = kwargs.get(\"hopSize\", roundUpToPowerOf2(self.samprate * 0.005))\n self.olaFac = int(kwargs.get(\"olaFac\", 2))\n\n def analyze(self, x):\n assert(self.olaFac > 0)\n # constant\n nX = len(x)\n nHop = getNFrame(nX, self.hopSize)\n nFrame = nHop * self.olaFac\n nBin = self.hopSize + 1\n windowFunc, B, windowMean = getWindow(\"hanning\")\n\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n windowNormFac = 2.0 / (windowMean * windowSize)\n\n # do calculate\n magnList = np.zeros((nFrame, nBin), dtype = np.float64)\n phaseList = np.zeros((nFrame, nBin), dtype = np.float64)\n for iFrame in range(nFrame):\n frame = getFrame(x, iFrame * self.hopSize // self.olaFac, windowSize)\n frame *= window\n\n tSig = np.zeros(windowSize, dtype = np.float64)\n tSig[:halfWindowSize] = frame[halfWindowSize:]\n tSig[-halfWindowSize:] = frame[:halfWindowSize]\n fSig = np.fft.rfft(tSig)\n magnList[iFrame] = np.abs(fSig) * windowNormFac\n phaseList[iFrame] = np.unwrap(np.angle(fSig))\n return magnList, phaseList\n\n def synth(self, *args):\n # constant\n nFrame, nBin = args[0].shape\n nHop = nFrame // self.olaFac\n nOut = nHop * self.hopSize\n\n windowFunc, B, windowMean = getWindow(\"hanning\")\n windowSize = 2 * self.hopSize\n halfWindowSize = self.hopSize\n window = np.sqrt(windowFunc(windowSize))\n\n # check input\n assert(nBin == self.hopSize + 1)\n\n # synth\n out = np.zeros(nOut, dtype = np.float64)\n if(len(args) == 1):\n fSigList = args[0]\n elif(len(args) == 2):\n fSigList = magnPhaseToFSig(*args)\n else:\n raise ValueError(\"Bad input.\")\n\n fSigList *= halfWindowSize\n for iFrame in range(nFrame):\n tSig = np.fft.irfft(fSigList[iFrame])\n ob, oe, ib, ie = getFrameRange(nOut, iFrame * self.hopSize // self.olaFac, windowSize)\n out[ib:ie] += (tSig * window)[ob:oe]\n out /= self.olaFac\n return out\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class InvalidDictError(Exception):
<|reserved_special_token_0|>
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvalidDictError(Exception):
"""Raised when the object can not be created from the provided dict."""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatabaseError(Exception):
<|reserved_special_token_0|>
pass
class InvalidDictError(Exception):
"""Raised when the object can not be created from the provided dict."""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatabaseError(Exception):
"""Raised when the requested database operation can not be completed."""
pass
class InvalidDictError(Exception):
"""Raised when the object can not be created from the provided dict."""
pass
<|reserved_special_token_1|>
"""This module provides the definition of the exceptions that can be raised from the database module."""
class DatabaseError(Exception):
"""Raised when the requested database operation can not be completed."""
pass
class InvalidDictError(Exception):
"""Raised when the object can not be created from the provided dict."""
pass
|
flexible
|
{
"blob_id": "94130b4962ecff2ea087ab34cf50a084254bf980",
"index": 8948,
"step-1": "<mask token>\n\n\nclass InvalidDictError(Exception):\n <mask token>\n pass\n",
"step-2": "<mask token>\n\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass DatabaseError(Exception):\n <mask token>\n pass\n\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass\n",
"step-4": "<mask token>\n\n\nclass DatabaseError(Exception):\n \"\"\"Raised when the requested database operation can not be completed.\"\"\"\n pass\n\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass\n",
"step-5": "\"\"\"This module provides the definition of the exceptions that can be raised from the database module.\"\"\"\n\nclass DatabaseError(Exception):\n \"\"\"Raised when the requested database operation can not be completed.\"\"\"\n pass\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True, help=
'"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True, help=
'Brick name to run')
parser.add_argument('-P', '--pickle', dest='pickle_pat', help=
'Pickle filename pattern, default %(default)s', default=
'pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None, help=
'Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir', help=
'Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=
old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {'outliers': None}
prereqs.update({'merge_checksums': 'outliers'})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[
stage], write=[], **kwargs)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def stage_merge_checksums(old_survey=None, survey=None, brickname=None, **
kwargs):
"""
For debugging / special-case processing, read previous checksums, and update them with
current checksums values, then write out the result.
"""
from collections import OrderedDict
cfn = old_survey.find_file('checksums', brick=brickname)
print('Old checksums:', cfn)
checksums = OrderedDict()
with open(cfn, 'r') as f:
for line in f.readlines():
words = line.split()
fn = words[1]
if fn.startswith('*'):
fn = fn[1:]
hashcode = words[0]
checksums[fn] = hashcode
with survey.write_output('checksums', brick=brickname, hashsum=False
) as out:
f = open(out.fn, 'w')
for fn, hashsum in survey.output_file_hashes.items():
print('Updating checksum', fn, '=', hashsum)
checksums[fn] = hashsum
for fn, hashsum in checksums.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True, help=
'"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True, help=
'Brick name to run')
parser.add_argument('-P', '--pickle', dest='pickle_pat', help=
'Pickle filename pattern, default %(default)s', default=
'pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None, help=
'Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir', help=
'Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=
old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {'outliers': None}
prereqs.update({'merge_checksums': 'outliers'})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[
stage], write=[], **kwargs)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def stage_merge_checksums(old_survey=None, survey=None, brickname=None, **
kwargs):
"""
For debugging / special-case processing, read previous checksums, and update them with
current checksums values, then write out the result.
"""
from collections import OrderedDict
cfn = old_survey.find_file('checksums', brick=brickname)
print('Old checksums:', cfn)
checksums = OrderedDict()
with open(cfn, 'r') as f:
for line in f.readlines():
words = line.split()
fn = words[1]
if fn.startswith('*'):
fn = fn[1:]
hashcode = words[0]
checksums[fn] = hashcode
with survey.write_output('checksums', brick=brickname, hashsum=False
) as out:
f = open(out.fn, 'w')
for fn, hashsum in survey.output_file_hashes.items():
print('Updating checksum', fn, '=', hashsum)
checksums[fn] = hashsum
for fn, hashsum in checksums.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True, help=
'"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True, help=
'Brick name to run')
parser.add_argument('-P', '--pickle', dest='pickle_pat', help=
'Pickle filename pattern, default %(default)s', default=
'pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None, help=
'Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir', help=
'Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=
old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {'outliers': None}
prereqs.update({'merge_checksums': 'outliers'})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[
stage], write=[], **kwargs)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import sys
def stage_merge_checksums(old_survey=None, survey=None, brickname=None, **
kwargs):
"""
For debugging / special-case processing, read previous checksums, and update them with
current checksums values, then write out the result.
"""
from collections import OrderedDict
cfn = old_survey.find_file('checksums', brick=brickname)
print('Old checksums:', cfn)
checksums = OrderedDict()
with open(cfn, 'r') as f:
for line in f.readlines():
words = line.split()
fn = words[1]
if fn.startswith('*'):
fn = fn[1:]
hashcode = words[0]
checksums[fn] = hashcode
with survey.write_output('checksums', brick=brickname, hashsum=False
) as out:
f = open(out.fn, 'w')
for fn, hashsum in survey.output_file_hashes.items():
print('Updating checksum', fn, '=', hashsum)
checksums[fn] = hashsum
for fn, hashsum in checksums.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True, help=
'"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True, help=
'Brick name to run')
parser.add_argument('-P', '--pickle', dest='pickle_pat', help=
'Pickle filename pattern, default %(default)s', default=
'pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None, help=
'Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir', help=
'Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=
old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {'outliers': None}
prereqs.update({'merge_checksums': 'outliers'})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[
stage], write=[], **kwargs)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#! /usr/bin/env python3
import sys
def stage_merge_checksums(
old_survey=None,
survey=None,
brickname=None,
**kwargs):
'''
For debugging / special-case processing, read previous checksums, and update them with
current checksums values, then write out the result.
'''
from collections import OrderedDict
cfn = old_survey.find_file('checksums', brick=brickname)
print('Old checksums:', cfn)
checksums = OrderedDict()
with open(cfn, 'r') as f:
for line in f.readlines():
words = line.split()
fn = words[1]
if fn.startswith('*'):
fn = fn[1:]
hashcode = words[0]
checksums[fn] = hashcode
# produce per-brick checksum file.
with survey.write_output('checksums', brick=brickname, hashsum=False) as out:
f = open(out.fn, 'w')
# Update hashsums
for fn,hashsum in survey.output_file_hashes.items():
print('Updating checksum', fn, '=', hashsum)
checksums[fn] = hashsum
# Write outputs
for fn,hashsum in checksums.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True,
help='"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True,
help='Brick name to run')
parser.add_argument(
'-P', '--pickle', dest='pickle_pat',
help='Pickle filename pattern, default %(default)s',
default='pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None,
help='Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir',
help='Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# tractor logging is *soooo* chatty
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir,
output_dir=old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {
'outliers': None,
}
prereqs.update({
'merge_checksums': 'outliers'
})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[stage],
write=[], **kwargs)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "a98d03b169b59704b3b592cee0b59f5389fd77b3",
"index": 8899,
"step-1": "<mask token>\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef stage_merge_checksums(old_survey=None, survey=None, brickname=None, **\n kwargs):\n \"\"\"\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n \"\"\"\n from collections import OrderedDict\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n with survey.write_output('checksums', brick=brickname, hashsum=False\n ) as out:\n f = open(out.fn, 'w')\n for fn, hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n for fn, hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef stage_merge_checksums(old_survey=None, survey=None, brickname=None, **\n kwargs):\n \"\"\"\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n \"\"\"\n from collections import OrderedDict\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n with survey.write_output('checksums', brick=brickname, hashsum=False\n ) as out:\n f = open(out.fn, 'w')\n for fn, hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n for fn, hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef stage_merge_checksums(old_survey=None, survey=None, brickname=None, **\n kwargs):\n \"\"\"\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n \"\"\"\n from collections import OrderedDict\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n with survey.write_output('checksums', brick=brickname, hashsum=False\n ) as out:\n f = open(out.fn, 'w')\n for fn, hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n for fn, hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#! /usr/bin/env python3\nimport sys\n\ndef stage_merge_checksums(\n old_survey=None,\n survey=None,\n brickname=None,\n **kwargs):\n '''\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n '''\n from collections import OrderedDict\n\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n\n # produce per-brick checksum file.\n with survey.write_output('checksums', brick=brickname, hashsum=False) as out:\n f = open(out.fn, 'w')\n # Update hashsums\n for fn,hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n # Write outputs\n for fn,hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True,\n help='\"Old\" output directory to read old checksum file from.')\n\n parser.add_argument('-b', '--brick', required=True,\n help='Brick name to run')\n parser.add_argument(\n '-P', '--pickle', dest='pickle_pat',\n help='Pickle filename pattern, default %(default)s',\n default='pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None,\n help='Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir',\n help='Set output base directory, default \".\"')\n\n opt = parser.parse_args()\n optdict = vars(opt)\n\n old_output_dir = optdict.pop('old_output')\n\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n # tractor logging is *soooo* chatty\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n \n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir,\n output_dir=old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n\n from astrometry.util.stages import CallGlobalTime, runstage\n\n prereqs = {\n 'outliers': None,\n }\n prereqs.update({\n 'merge_checksums': 'outliers'\n })\n\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[stage],\n write=[], **kwargs)\n \nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
f_s_list = [2, 1.5, 1, 0.5, 0.2]
g_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]
h_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]
i_seed_list = [1, 12, 123, 1234, 12345, 123456]
for s in f_s_list:
os.system("python SKs_model.py " + str(s) + " 0 10000 0 relu")
for train_end in g_end_list:
os.system("python SKs_model.py 0.2 0 " + str(train_end) + " 0 relu")
for train_begin, train_end in h_i_list:
os.system("python SKs_model.py 0.2 " + str(train_begin) + " " + str(train_end) + " 0 relu")
for seed in i_seed_list:
os.system("python SKs_model.py 0.2 0 10000 " + str(seed) + " relu")
for activation in ["sigmoid", "relu"]:
os.system("python SKs_model.py 0.2 0 10000 0 " + activation)
|
normal
|
{
"blob_id": "56a681015ea27e2c8e00ab8bcc8019d5987c4ee1",
"index": 6949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor s in f_s_list:\n os.system('python SKs_model.py ' + str(s) + ' 0 10000 0 relu')\nfor train_end in g_end_list:\n os.system('python SKs_model.py 0.2 0 ' + str(train_end) + ' 0 relu')\nfor train_begin, train_end in h_i_list:\n os.system('python SKs_model.py 0.2 ' + str(train_begin) + ' ' + str(\n train_end) + ' 0 relu')\nfor seed in i_seed_list:\n os.system('python SKs_model.py 0.2 0 10000 ' + str(seed) + ' relu')\nfor activation in ['sigmoid', 'relu']:\n os.system('python SKs_model.py 0.2 0 10000 0 ' + activation)\n",
"step-3": "<mask token>\nf_s_list = [2, 1.5, 1, 0.5, 0.2]\ng_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]\nh_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]\ni_seed_list = [1, 12, 123, 1234, 12345, 123456]\nfor s in f_s_list:\n os.system('python SKs_model.py ' + str(s) + ' 0 10000 0 relu')\nfor train_end in g_end_list:\n os.system('python SKs_model.py 0.2 0 ' + str(train_end) + ' 0 relu')\nfor train_begin, train_end in h_i_list:\n os.system('python SKs_model.py 0.2 ' + str(train_begin) + ' ' + str(\n train_end) + ' 0 relu')\nfor seed in i_seed_list:\n os.system('python SKs_model.py 0.2 0 10000 ' + str(seed) + ' relu')\nfor activation in ['sigmoid', 'relu']:\n os.system('python SKs_model.py 0.2 0 10000 0 ' + activation)\n",
"step-4": "import os\nf_s_list = [2, 1.5, 1, 0.5, 0.2]\ng_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]\nh_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]\ni_seed_list = [1, 12, 123, 1234, 12345, 123456]\nfor s in f_s_list:\n os.system('python SKs_model.py ' + str(s) + ' 0 10000 0 relu')\nfor train_end in g_end_list:\n os.system('python SKs_model.py 0.2 0 ' + str(train_end) + ' 0 relu')\nfor train_begin, train_end in h_i_list:\n os.system('python SKs_model.py 0.2 ' + str(train_begin) + ' ' + str(\n train_end) + ' 0 relu')\nfor seed in i_seed_list:\n os.system('python SKs_model.py 0.2 0 10000 ' + str(seed) + ' relu')\nfor activation in ['sigmoid', 'relu']:\n os.system('python SKs_model.py 0.2 0 10000 0 ' + activation)\n",
"step-5": "import os\n\nf_s_list = [2, 1.5, 1, 0.5, 0.2]\n\ng_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]\n\nh_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]\n\ni_seed_list = [1, 12, 123, 1234, 12345, 123456]\n\nfor s in f_s_list:\n os.system(\"python SKs_model.py \" + str(s) + \" 0 10000 0 relu\")\n\nfor train_end in g_end_list:\n os.system(\"python SKs_model.py 0.2 0 \" + str(train_end) + \" 0 relu\")\n\nfor train_begin, train_end in h_i_list:\n os.system(\"python SKs_model.py 0.2 \" + str(train_begin) + \" \" + str(train_end) + \" 0 relu\")\n\nfor seed in i_seed_list:\n os.system(\"python SKs_model.py 0.2 0 10000 \" + str(seed) + \" relu\")\n\nfor activation in [\"sigmoid\", \"relu\"]:\n os.system(\"python SKs_model.py 0.2 0 10000 0 \" + activation)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print("Python's Data Type")
print('"Python is very easy" he said.')
print("""No pain
No gain""")
print("""No pain
No gain""")
print("""No pain
No gain""")
<|reserved_special_token_0|>
print('Ha\tHa\tHa')
print('역슬래시 \\')
print('쌍따옴표 "')
print("홑따옴표 '")
<|reserved_special_token_1|>
#1.문자열에 홑따옴표 포함기키기 : 쌍따옴표
print("Python's Data Type")
#2.문자열에 쌍따옴표 포함시키기 : 홑따옴표
print('"Python is very easy" he said.')
#멀티라인(여러줄)표현하기
#1. 연속된 쌍따옴표 3개 사용하기
print("""No pain
No gain""")
#2. 연속된 쌍따옴표 3개 사용하기
print('''No pain
No gain''')
#3.이스케이프 코드 \n 삽입하기
print("No pain \n No gain")
"""
이스케이프(escape) 문자
\n :new line. 문자열 안에서 줄을 바꿀 때 사용
\t :tap.문자열 사이에 탭만큼의 간격을 줄 때 사용
\\ :문자 \를 그대로 표현할 때 사용
\' :홑따옴표를 그대로 표현할 때 사용
\" :쌍따옴표를 그대로 표현할 때 사용
"""
print("Ha\tHa\tHa")
print("역슬래시 \\")
print("쌍따옴표 \"")
print("홑따옴표 \'")
|
flexible
|
{
"blob_id": "eb81f1825c4ac8e20dde1daefbdad22f588e696e",
"index": 9431,
"step-1": "<mask token>\n",
"step-2": "print(\"Python's Data Type\")\nprint('\"Python is very easy\" he said.')\nprint(\"\"\"No pain\n No gain\"\"\")\nprint(\"\"\"No pain\n No gain\"\"\")\nprint(\"\"\"No pain \n No gain\"\"\")\n<mask token>\nprint('Ha\\tHa\\tHa')\nprint('역슬래시 \\\\')\nprint('쌍따옴표 \"')\nprint(\"홑따옴표 '\")\n",
"step-3": "#1.문자열에 홑따옴표 포함기키기 : 쌍따옴표\nprint(\"Python's Data Type\")\n\n#2.문자열에 쌍따옴표 포함시키기 : 홑따옴표\nprint('\"Python is very easy\" he said.')\n\n#멀티라인(여러줄)표현하기\n#1. 연속된 쌍따옴표 3개 사용하기\nprint(\"\"\"No pain\n No gain\"\"\")\n\n#2. 연속된 쌍따옴표 3개 사용하기\nprint('''No pain\n No gain''')\n\n#3.이스케이프 코드 \\n 삽입하기\nprint(\"No pain \\n No gain\")\n\n\"\"\"\n이스케이프(escape) 문자\n\\n :new line. 문자열 안에서 줄을 바꿀 때 사용\n\\t :tap.문자열 사이에 탭만큼의 간격을 줄 때 사용\n\\\\ :문자 \\를 그대로 표현할 때 사용\n\\' :홑따옴표를 그대로 표현할 때 사용\n\\\" :쌍따옴표를 그대로 표현할 때 사용\n\"\"\"\nprint(\"Ha\\tHa\\tHa\")\nprint(\"역슬래시 \\\\\")\nprint(\"쌍따옴표 \\\"\")\nprint(\"홑따옴표 \\'\")\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import preprocessing
import tokenization
import vectorspacemodel
import pickle
import collections
import os
import math
import operator
from itertools import islice
def take(n, iterable):
# "Return first n items of the iterable as a list"
return list(islice(iterable, n))
directory = os.getcwd()
links_path = os.path.join(directory, 'links')
# Getting Index from pickle dump
with open("D_INDEXED_FILE/index", 'rb') as f:
while True:
try:
index = pickle.load(f)
except EOFError:
break
inv_index = index
# Getting Document vectors from pickle dump
dv = {}
vec_files = [file for file in os.listdir("D_INDEXED_FILE/vectors/.") if file.endswith("vector")]
# x = index, y = filename
for x, y in enumerate(vec_files):
# Open all of the token lists
with open("D_INDEXED_FILE/vectors/" + y, 'rb') as ff:
while True:
try:
vector = pickle.load(ff)
except EOFError:
break
dv[y] = vector
# By here you will get all document vectors in dv variable
#print("Document vectors are: ", dv)
query = input("Enter the query: ")
query_vector = []
idf,terms = vectorspacemodel.get_idf(inv_index)
od = collections.OrderedDict(sorted(idf.items()))
#print("idf is: ", idf)
#print("terms are: ", terms)
processed_query = preprocessing.parse_query(query.lower())
#print("processed query is: ", processed_query)
tokenized_query = tokenization.query_tokenization(processed_query)
#print("tokenized query is: ", tokenized_query)
# This code makes the query vector and normalizes it
for x,y in enumerate((od.items())):
for i in tokenized_query.split():
if i == y[0]:
#print(y[1])
if [y[1],x] in query_vector:
query_vector.remove([y[1], x])
query_vector.append([y[1]+y[1],x])
else:
query_vector.append([y[1],x])
#print("Unnormalized query vector is: ", query_vector)
# Normalizing here
weight = 0.0
for i in range(len(query_vector)):
weight = weight + (query_vector[i][0] ** 2)
weight = math.sqrt(weight)
# print("weight is: ", weight)
for i in range(len(query_vector)):
query_vector[i][0] = query_vector[i][0] / weight
#print("the Normalized query vector is: ", query_vector)
# Calculate Similarity between query vector and all document vectors
similarity = {}
for k in dv.keys():
sim = float(0)
for i in range(len(query_vector)):
di = query_vector[i][1]
#import pdb; pdb.set_trace()
for j in range(len(dv[k])):
dj = dv[k][j][1]
if di == dj:
mul = query_vector[i][0] * dv[k][j][0]
sim += mul
#print (mul)
break
elif di < dj:
break
similarity[k] = sim
#print("document vector is: ", dv[k])
#print("query vector is: ", v1)
#print ("similarity is: ", sim)
#print(sim)
#print("cosine similarity is: ", similarity)
sorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)
#print("Sorted Cosine Similarity",sorted_x)
top_7 = take(7, sorted_x)
#print("Top 7 documents are: ", top_7)
# Getting the links file to match file with link
with open(links_path, 'rb') as f:
while True:
try:
web_links = pickle.load(f)
except EOFError:
break
#print("All the web links are: ", web_links)
#print("Top 10 documents are:\n ", ("\n".join(str(x[0][0:-7]) for x in top_5)).strip())
print("Our Search Results are: ")
for x in top_7:
#print("".join(str(x[0][0:-7])))
if x[1] == float(0):
print("No relevant documents found!")
break
else:
for j in web_links.keys():
if "".join(str(x[0][0:-7])) == j[0:-5]:
print(repr(web_links[j]).strip('\''))
# print("Total document vectors are: ", len(dv))
# print("Total unique terms for index are: ", len(inv_index))
# print("Total unique terms from terms are: ", len(terms))
# print("Toal unique terms from idf are: ", len(idf))
|
normal
|
{
"blob_id": "1630a3d0becac195feee95a1c3b23568612a48d2",
"index": 3194,
"step-1": "<mask token>\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\n<mask token>\nwith open('D_INDEXED_FILE/index', 'rb') as f:\n while True:\n try:\n index = pickle.load(f)\n except EOFError:\n break\n<mask token>\nfor x, y in enumerate(vec_files):\n with open('D_INDEXED_FILE/vectors/' + y, 'rb') as ff:\n while True:\n try:\n vector = pickle.load(ff)\n except EOFError:\n break\n dv[y] = vector\n<mask token>\nfor x, y in enumerate(od.items()):\n for i in tokenized_query.split():\n if i == y[0]:\n if [y[1], x] in query_vector:\n query_vector.remove([y[1], x])\n query_vector.append([y[1] + y[1], x])\n else:\n query_vector.append([y[1], x])\n<mask token>\nfor i in range(len(query_vector)):\n weight = weight + query_vector[i][0] ** 2\n<mask token>\nfor i in range(len(query_vector)):\n query_vector[i][0] = query_vector[i][0] / weight\n<mask token>\nfor k in dv.keys():\n sim = float(0)\n for i in range(len(query_vector)):\n di = query_vector[i][1]\n for j in range(len(dv[k])):\n dj = dv[k][j][1]\n if di == dj:\n mul = query_vector[i][0] * dv[k][j][0]\n sim += mul\n break\n elif di < dj:\n break\n similarity[k] = sim\n<mask token>\nwith open(links_path, 'rb') as f:\n while True:\n try:\n web_links = pickle.load(f)\n except EOFError:\n break\nprint('Our Search Results are: ')\nfor x in top_7:\n if x[1] == float(0):\n print('No relevant documents found!')\n break\n else:\n for j in web_links.keys():\n if ''.join(str(x[0][0:-7])) == j[0:-5]:\n print(repr(web_links[j]).strip(\"'\"))\n",
"step-3": "<mask token>\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\ndirectory = os.getcwd()\nlinks_path = os.path.join(directory, 'links')\nwith open('D_INDEXED_FILE/index', 'rb') as f:\n while True:\n try:\n index = pickle.load(f)\n except EOFError:\n break\ninv_index = index\ndv = {}\nvec_files = [file for file in os.listdir('D_INDEXED_FILE/vectors/.') if\n file.endswith('vector')]\nfor x, y in enumerate(vec_files):\n with open('D_INDEXED_FILE/vectors/' + y, 'rb') as ff:\n while True:\n try:\n vector = pickle.load(ff)\n except EOFError:\n break\n dv[y] = vector\nquery = input('Enter the query: ')\nquery_vector = []\nidf, terms = vectorspacemodel.get_idf(inv_index)\nod = collections.OrderedDict(sorted(idf.items()))\nprocessed_query = preprocessing.parse_query(query.lower())\ntokenized_query = tokenization.query_tokenization(processed_query)\nfor x, y in enumerate(od.items()):\n for i in tokenized_query.split():\n if i == y[0]:\n if [y[1], x] in query_vector:\n query_vector.remove([y[1], x])\n query_vector.append([y[1] + y[1], x])\n else:\n query_vector.append([y[1], x])\nweight = 0.0\nfor i in range(len(query_vector)):\n weight = weight + query_vector[i][0] ** 2\nweight = math.sqrt(weight)\nfor i in range(len(query_vector)):\n query_vector[i][0] = query_vector[i][0] / weight\nsimilarity = {}\nfor k in dv.keys():\n sim = float(0)\n for i in range(len(query_vector)):\n di = query_vector[i][1]\n for j in range(len(dv[k])):\n dj = dv[k][j][1]\n if di == dj:\n mul = query_vector[i][0] * dv[k][j][0]\n sim += mul\n break\n elif di < dj:\n break\n similarity[k] = sim\nsorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)\ntop_7 = take(7, sorted_x)\nwith open(links_path, 'rb') as f:\n while True:\n try:\n web_links = pickle.load(f)\n except EOFError:\n break\nprint('Our Search Results are: ')\nfor x in top_7:\n if x[1] == float(0):\n print('No relevant documents found!')\n break\n else:\n for j in web_links.keys():\n if ''.join(str(x[0][0:-7])) == j[0:-5]:\n print(repr(web_links[j]).strip(\"'\"))\n",
"step-4": "import preprocessing\nimport tokenization\nimport vectorspacemodel\nimport pickle\nimport collections\nimport os\nimport math\nimport operator\nfrom itertools import islice\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\ndirectory = os.getcwd()\nlinks_path = os.path.join(directory, 'links')\nwith open('D_INDEXED_FILE/index', 'rb') as f:\n while True:\n try:\n index = pickle.load(f)\n except EOFError:\n break\ninv_index = index\ndv = {}\nvec_files = [file for file in os.listdir('D_INDEXED_FILE/vectors/.') if\n file.endswith('vector')]\nfor x, y in enumerate(vec_files):\n with open('D_INDEXED_FILE/vectors/' + y, 'rb') as ff:\n while True:\n try:\n vector = pickle.load(ff)\n except EOFError:\n break\n dv[y] = vector\nquery = input('Enter the query: ')\nquery_vector = []\nidf, terms = vectorspacemodel.get_idf(inv_index)\nod = collections.OrderedDict(sorted(idf.items()))\nprocessed_query = preprocessing.parse_query(query.lower())\ntokenized_query = tokenization.query_tokenization(processed_query)\nfor x, y in enumerate(od.items()):\n for i in tokenized_query.split():\n if i == y[0]:\n if [y[1], x] in query_vector:\n query_vector.remove([y[1], x])\n query_vector.append([y[1] + y[1], x])\n else:\n query_vector.append([y[1], x])\nweight = 0.0\nfor i in range(len(query_vector)):\n weight = weight + query_vector[i][0] ** 2\nweight = math.sqrt(weight)\nfor i in range(len(query_vector)):\n query_vector[i][0] = query_vector[i][0] / weight\nsimilarity = {}\nfor k in dv.keys():\n sim = float(0)\n for i in range(len(query_vector)):\n di = query_vector[i][1]\n for j in range(len(dv[k])):\n dj = dv[k][j][1]\n if di == dj:\n mul = query_vector[i][0] * dv[k][j][0]\n sim += mul\n break\n elif di < dj:\n break\n similarity[k] = sim\nsorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)\ntop_7 = take(7, sorted_x)\nwith open(links_path, 'rb') as f:\n while True:\n try:\n web_links = pickle.load(f)\n except EOFError:\n break\nprint('Our Search Results are: ')\nfor x in top_7:\n if x[1] == float(0):\n print('No relevant documents found!')\n break\n else:\n for j in web_links.keys():\n if ''.join(str(x[0][0:-7])) == j[0:-5]:\n print(repr(web_links[j]).strip(\"'\"))\n",
"step-5": "import preprocessing\r\nimport tokenization\r\nimport vectorspacemodel\r\nimport pickle\r\nimport collections\r\nimport os\r\nimport math\r\nimport operator\r\nfrom itertools import islice\r\n\r\ndef take(n, iterable):\r\n # \"Return first n items of the iterable as a list\"\r\n return list(islice(iterable, n))\r\n\r\ndirectory = os.getcwd()\r\nlinks_path = os.path.join(directory, 'links')\r\n\r\n# Getting Index from pickle dump\r\nwith open(\"D_INDEXED_FILE/index\", 'rb') as f:\r\n while True:\r\n try:\r\n index = pickle.load(f)\r\n except EOFError:\r\n break\r\ninv_index = index\r\n\r\n# Getting Document vectors from pickle dump\r\ndv = {}\r\nvec_files = [file for file in os.listdir(\"D_INDEXED_FILE/vectors/.\") if file.endswith(\"vector\")]\r\n# x = index, y = filename\r\nfor x, y in enumerate(vec_files):\r\n # Open all of the token lists\r\n with open(\"D_INDEXED_FILE/vectors/\" + y, 'rb') as ff:\r\n while True:\r\n try:\r\n vector = pickle.load(ff)\r\n except EOFError:\r\n break\r\n dv[y] = vector\r\n# By here you will get all document vectors in dv variable\r\n\r\n#print(\"Document vectors are: \", dv)\r\n\r\nquery = input(\"Enter the query: \")\r\nquery_vector = []\r\n\r\n\r\nidf,terms = vectorspacemodel.get_idf(inv_index)\r\nod = collections.OrderedDict(sorted(idf.items()))\r\n#print(\"idf is: \", idf)\r\n#print(\"terms are: \", terms)\r\n\r\nprocessed_query = preprocessing.parse_query(query.lower())\r\n#print(\"processed query is: \", processed_query)\r\ntokenized_query = tokenization.query_tokenization(processed_query)\r\n#print(\"tokenized query is: \", tokenized_query)\r\n\r\n# This code makes the query vector and normalizes it\r\nfor x,y in enumerate((od.items())):\r\n for i in tokenized_query.split():\r\n if i == y[0]:\r\n #print(y[1])\r\n if [y[1],x] in query_vector:\r\n query_vector.remove([y[1], x])\r\n query_vector.append([y[1]+y[1],x])\r\n else:\r\n query_vector.append([y[1],x])\r\n\r\n#print(\"Unnormalized query vector is: \", query_vector)\r\n\r\n# Normalizing here\r\nweight = 0.0\r\nfor i in range(len(query_vector)):\r\n weight = weight + (query_vector[i][0] ** 2)\r\nweight = math.sqrt(weight)\r\n# print(\"weight is: \", weight)\r\nfor i in range(len(query_vector)):\r\n query_vector[i][0] = query_vector[i][0] / weight\r\n\r\n#print(\"the Normalized query vector is: \", query_vector)\r\n\r\n# Calculate Similarity between query vector and all document vectors\r\nsimilarity = {}\r\nfor k in dv.keys():\r\n sim = float(0)\r\n for i in range(len(query_vector)):\r\n di = query_vector[i][1]\r\n #import pdb; pdb.set_trace()\r\n for j in range(len(dv[k])):\r\n dj = dv[k][j][1]\r\n if di == dj:\r\n mul = query_vector[i][0] * dv[k][j][0]\r\n sim += mul\r\n #print (mul)\r\n break\r\n elif di < dj:\r\n break\r\n similarity[k] = sim\r\n #print(\"document vector is: \", dv[k])\r\n #print(\"query vector is: \", v1)\r\n #print (\"similarity is: \", sim)\r\n #print(sim)\r\n\r\n#print(\"cosine similarity is: \", similarity)\r\n\r\nsorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)\r\n#print(\"Sorted Cosine Similarity\",sorted_x)\r\ntop_7 = take(7, sorted_x)\r\n#print(\"Top 7 documents are: \", top_7)\r\n\r\n# Getting the links file to match file with link\r\nwith open(links_path, 'rb') as f:\r\n while True:\r\n try:\r\n web_links = pickle.load(f)\r\n except EOFError:\r\n break\r\n#print(\"All the web links are: \", web_links)\r\n\r\n\r\n\r\n#print(\"Top 10 documents are:\\n \", (\"\\n\".join(str(x[0][0:-7]) for x in top_5)).strip())\r\nprint(\"Our Search Results are: \")\r\nfor x in top_7:\r\n #print(\"\".join(str(x[0][0:-7])))\r\n if x[1] == float(0):\r\n print(\"No relevant documents found!\")\r\n break\r\n else:\r\n for j in web_links.keys():\r\n if \"\".join(str(x[0][0:-7])) == j[0:-5]:\r\n print(repr(web_links[j]).strip('\\''))\r\n\r\n# print(\"Total document vectors are: \", len(dv))\r\n# print(\"Total unique terms for index are: \", len(inv_index))\r\n# print(\"Total unique terms from terms are: \", len(terms))\r\n# print(\"Toal unique terms from idf are: \", len(idf))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import datetime
import time
import boto3
from botocore.config import Config
# FinSpace class with Spark bindings
class SparkFinSpace(FinSpace):
import pyspark
def __init__(
self,
spark: pyspark.sql.session.SparkSession = None,
config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict = None
):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
# data_frame.write.option('header', 'true').csv(upload_location)
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):
print("Uploading data...")
upload_location = self.upload_dataframe(data_frame)
print("Data upload finished. Ingesting data...")
return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(
self,
dataset_id: str,
view_id: str
):
# TODO: switch to DescribeMatz when available in HFS
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
# 0. Ensure view is ready to be read
if (view['status'] != 'SUCCESS'):
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
# Query Glue table directly with catalog function of spark
return self.spark.table(f"`{glue_db_name}`.`{glue_table_name}`")
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):
from pyspark.sql.types import StructType
# for translation to FinSpace's schema
# 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'
DoubleType = "DOUBLE"
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {
"BinaryType" : StringType,
"BooleanType" : BooleanType,
"ByteType" : IntegerType,
"DateType" : DateType,
"DoubleType" : FloatType,
"IntegerType" : IntegerType,
"LongType" : IntegerType,
"NullType" : StringType,
"ShortType" : IntegerType,
"StringType" : StringType,
"TimestampType" : TimestampType,
}
for i in items:
# print( f"name: {i.name} type: {i.dataType}" )
habType = switcher.get( str(i.dataType), StringType)
hab_columns.append({
"dataType" : habType,
"name" : i.name,
"description" : ""
})
return( hab_columns )
|
normal
|
{
"blob_id": "4f4af4caf81397542e9cd94c50b54303e2f81881",
"index": 3926,
"step-1": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n <mask token>\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n\n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame\n ):\n from pyspark.sql.types import StructType\n DoubleType = 'DOUBLE'\n FloatType = 'FLOAT'\n DateType = 'DATE'\n StringType = 'STRING'\n IntegerType = 'INTEGER'\n LongType = 'BIGINT'\n BooleanType = 'BOOLEAN'\n TimestampType = 'DATETIME'\n hab_columns = []\n items = [i for i in data_frame.schema]\n switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,\n 'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':\n FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,\n 'NullType': StringType, 'ShortType': IntegerType, 'StringType':\n StringType, 'TimestampType': TimestampType}\n for i in items:\n habType = switcher.get(str(i.dataType), StringType)\n hab_columns.append({'dataType': habType, 'name': i.name,\n 'description': ''})\n return hab_columns\n",
"step-4": "import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n\n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame\n ):\n from pyspark.sql.types import StructType\n DoubleType = 'DOUBLE'\n FloatType = 'FLOAT'\n DateType = 'DATE'\n StringType = 'STRING'\n IntegerType = 'INTEGER'\n LongType = 'BIGINT'\n BooleanType = 'BOOLEAN'\n TimestampType = 'DATETIME'\n hab_columns = []\n items = [i for i in data_frame.schema]\n switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,\n 'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':\n FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,\n 'NullType': StringType, 'ShortType': IntegerType, 'StringType':\n StringType, 'TimestampType': TimestampType}\n for i in items:\n habType = switcher.get(str(i.dataType), StringType)\n hab_columns.append({'dataType': habType, 'name': i.name,\n 'description': ''})\n return hab_columns\n",
"step-5": "import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n# FinSpace class with Spark bindings\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n def __init__(\n self, \n spark: pyspark.sql.session.SparkSession = None,\n config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict = None\n ):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames\n \n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n# data_frame.write.option('header', 'true').csv(upload_location)\n data_frame.write.parquet(upload_location)\n return upload_location\n \n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):\n print(\"Uploading data...\")\n upload_location = self.upload_dataframe(data_frame)\n \n print(\"Data upload finished. Ingesting data...\")\n \n return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})\n \n def read_view_as_spark(\n self,\n dataset_id: str,\n view_id: str\n ):\n # TODO: switch to DescribeMatz when available in HFS\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n \n # 0. Ensure view is ready to be read\n if (view['status'] != 'SUCCESS'): \n status = view['status'] \n print(f'view run status is not ready: {status}. Returning empty.')\n return\n\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n \n # Query Glue table directly with catalog function of spark\n return self.spark.table(f\"`{glue_db_name}`.`{glue_table_name}`\")\n \n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):\n from pyspark.sql.types import StructType\n\n # for translation to FinSpace's schema\n # 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'\n DoubleType = \"DOUBLE\"\n FloatType = \"FLOAT\"\n DateType = \"DATE\"\n StringType = \"STRING\"\n IntegerType = \"INTEGER\"\n LongType = \"BIGINT\"\n BooleanType = \"BOOLEAN\"\n TimestampType = \"DATETIME\"\n \n hab_columns = []\n\n items = [i for i in data_frame.schema] \n\n switcher = {\n \"BinaryType\" : StringType,\n \"BooleanType\" : BooleanType,\n \"ByteType\" : IntegerType,\n \"DateType\" : DateType,\n \"DoubleType\" : FloatType,\n \"IntegerType\" : IntegerType,\n \"LongType\" : IntegerType,\n \"NullType\" : StringType,\n \"ShortType\" : IntegerType,\n \"StringType\" : StringType,\n \"TimestampType\" : TimestampType,\n }\n\n \n for i in items:\n# print( f\"name: {i.name} type: {i.dataType}\" )\n\n habType = switcher.get( str(i.dataType), StringType)\n\n hab_columns.append({\n \"dataType\" : habType, \n \"name\" : i.name,\n \"description\" : \"\"\n })\n\n return( hab_columns )\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from django.db import models
import string
import random
def id_generator(size=32, chars=string.ascii_uppercase + string.digits):
exists = True
while exists == True:
ran = ''.join(random.choice(chars) for _ in range(size))
if len(Item.objects.filter(random_str=ran)) == 0:
exists = False
return ran
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=999, unique=True)
description = models.TextField(blank=True)
random_str = models.CharField(max_length=999, default=id_generator)
original_price = models.FloatField()
markup_percentage = models.PositiveIntegerField(default=120)
price = models.FloatField(blank=True)
discount_percentage = models.PositiveIntegerField(default=0)
#TODO suurused
img = models.ImageField()
img_2 = models.ImageField(null=True, blank=True)
img_3 = models.ImageField(null=True, blank=True)
img_4 = models.ImageField(null=True, blank=True)
def save(self, *args, **kwargs):
if self.price is None:
self.price = self.original_price * self.markup_percentage / 100
super(Item, self).save(*args, **kwargs)
def __str__(self):
if self.discount_percentage == 0:
return self.name + " - " + str(self.price) + "€"
else:
return self.name + " - " + str( self.price*((100-self.discount_percentage)/100) ) + "€ - DISCOUNT " + str(self.discount_percentage) + "%"
|
normal
|
{
"blob_id": "efba815fe64cddb5315b17b2cbaf1d3fc38c11ee",
"index": 4995,
"step-1": "<mask token>\n\n\nclass Item(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Item(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n",
"step-3": "<mask token>\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=999, unique=True)\n description = models.TextField(blank=True)\n random_str = models.CharField(max_length=999, default=id_generator)\n original_price = models.FloatField()\n markup_percentage = models.PositiveIntegerField(default=120)\n price = models.FloatField(blank=True)\n discount_percentage = models.PositiveIntegerField(default=0)\n img = models.ImageField()\n img_2 = models.ImageField(null=True, blank=True)\n img_3 = models.ImageField(null=True, blank=True)\n img_4 = models.ImageField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n",
"step-4": "from django.db import models\nimport string\nimport random\n\n\ndef id_generator(size=32, chars=string.ascii_uppercase + string.digits):\n exists = True\n while exists == True:\n ran = ''.join(random.choice(chars) for _ in range(size))\n if len(Item.objects.filter(random_str=ran)) == 0:\n exists = False\n return ran\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=999, unique=True)\n description = models.TextField(blank=True)\n random_str = models.CharField(max_length=999, default=id_generator)\n original_price = models.FloatField()\n markup_percentage = models.PositiveIntegerField(default=120)\n price = models.FloatField(blank=True)\n discount_percentage = models.PositiveIntegerField(default=0)\n img = models.ImageField()\n img_2 = models.ImageField(null=True, blank=True)\n img_3 = models.ImageField(null=True, blank=True)\n img_4 = models.ImageField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n",
"step-5": "from django.db import models\nimport string\nimport random\n\ndef id_generator(size=32, chars=string.ascii_uppercase + string.digits):\n\texists = True\n\twhile exists == True:\n\t\tran = ''.join(random.choice(chars) for _ in range(size))\n\t\tif len(Item.objects.filter(random_str=ran)) == 0:\n\t\t\texists = False\n\n\treturn ran\n\n\n\n# Create your models here.\nclass Item(models.Model):\n\tname = models.CharField(max_length=999, unique=True)\n\tdescription = models.TextField(blank=True)\n\trandom_str = models.CharField(max_length=999, default=id_generator)\n\n\toriginal_price = models.FloatField()\n\tmarkup_percentage = models.PositiveIntegerField(default=120)\n\tprice = models.FloatField(blank=True) \n\tdiscount_percentage = models.PositiveIntegerField(default=0)\n\n#TODO suurused\n\n\n\timg = models.ImageField()\n\timg_2 = models.ImageField(null=True, blank=True)\n\timg_3 = models.ImageField(null=True, blank=True)\n\timg_4 = models.ImageField(null=True, blank=True)\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.price is None:\n\t\t\tself.price = self.original_price * self.markup_percentage / 100\n\t\tsuper(Item, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\tif self.discount_percentage == 0:\n\t\t\treturn self.name + \" - \" + str(self.price) + \"€\"\n\t\telse:\n\t\t\treturn self.name + \" - \" + str( self.price*((100-self.discount_percentage)/100) ) + \"€ - DISCOUNT \" + str(self.discount_percentage) + \"%\"",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
import inputoutput
def xor_encryption(source, destination, key):
"""
Returns text encrypted or decrypted with xor
Keyword arguments:
source - path to file with text to be encrypted
destination - path to the file where you want to save the result
key - encryption key
"""
text = inputoutput.read_from_file(source, "b")
# text = read_from_file(source)
key = bytearray(key, 'utf-8')
result = bytearray()
for i in range(len(text)):
result.append(text[i] ^ key[i % len(key)])
inputoutput.write_to_file(result, destination, "b")
# def write_to_file(data, filename):
# """
# Write binary data to file
# Keyword arguments:
# data - binary data to be written
# filename - path to the file where you want to save the result
# """
# f = open(filename, 'wb')
# f.write(data)
# f.close()
# def read_from_file(filename):
# """
# Read binary data from file
# Keyword arguments:
# filename - path to the file where you want to save the result
# Returns:
# data - binary data from file
# """
# f = open(filename, 'rb')
# data = f.read()
# f.close()
# return data
key = 'verystongk'
# Шифрование
xor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)
# Расшифрование
xor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)
|
normal
|
{
"blob_id": "81774d3b4d9fbf22ed19e1cba7ec5e8e3707f51a",
"index": 2076,
"step-1": "<mask token>\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\n<mask token>\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n",
"step-3": "<mask token>\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\nkey = 'verystongk'\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n",
"step-4": "import inputoutput\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, 'b')\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, 'b')\n\n\nkey = 'verystongk'\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n",
"step-5": "import inputoutput\n\n\ndef xor_encryption(source, destination, key):\n \"\"\"\n Returns text encrypted or decrypted with xor\n\n Keyword arguments:\n source - path to file with text to be encrypted\n destination - path to the file where you want to save the result\n key - encryption key\n \"\"\"\n text = inputoutput.read_from_file(source, \"b\")\n # text = read_from_file(source)\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, \"b\")\n\n\n# def write_to_file(data, filename):\n# \"\"\"\n# Write binary data to file\n\n# Keyword arguments:\n# data - binary data to be written\n# filename - path to the file where you want to save the result\n# \"\"\"\n# f = open(filename, 'wb')\n# f.write(data)\n# f.close()\n\n\n# def read_from_file(filename):\n# \"\"\"\n# Read binary data from file\n\n# Keyword arguments:\n# filename - path to the file where you want to save the result\n\n# Returns:\n# data - binary data from file\n# \"\"\"\n# f = open(filename, 'rb')\n# data = f.read()\n# f.close()\n# return data\n\n\nkey = 'verystongk'\n# Шифрование\nxor_encryption('sixth_practice/text.txt', 'sixth_practice/text1.txt', key)\n# Расшифрование\nxor_encryption('sixth_practice/text1.txt', 'sixth_practice/text2.txt', key)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# from datetime import datetime
from datetime import datetime, time, timedelta
# today = datetime.now()
# previous_day = today - timedelta(days=1)
# previous_day = previous_day.strftime("%Y%m%d")
# print(today)
# print(previous_day)
print(datetime.strptime("2013-1-25", '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))
# def is_midnight():
# current_time = datetime.now().time
# match_date = datetime.now()
# # current_time = datetime.now()
# if current_time >= time(0,0) and current_time <= time(4, 30):
# print(match_date)
# return match_date.strftime("%Y%m%d")
# else:
# match_date = current_time - timedelta(days=1)
# return match_date
# print(is_midnight())
# def is_midnight():
# current_time = datetime.now().time()
# # today = datetime.now()
# previous_day = current_time - timedelta(days=1)
# previous_day = previous_day.strftime("%Y%m%d")
# if current_time >= time(0,0) and current_time <= time(4, 30):
# print('yesy')
# if time(0,0) < time(4, 30):
# return current_time >= time(0,0) and current_time <= time(4, 30)
# else: # crosses midnight
# return current_time >= time(0,0) or current_time <= time(4, 30)
# print(is_midnight())
# match_date = datetime.now()
# current_time = datetime.now().time()
# if current_time >= time(0,0) and current_time <= time(4, 30):
# previous_day = match_date - timedelta(days=1)
# match_date = previous_day.strftime("%Y%m%d")
# print(match_date)
# match_date = datetime.now().strftime("%Y-%m-%d")
# # datetime.timedelta(days=1)
# hour = datetime.now().strftime("%H")
# if int(hour) < 4:
# print('Previous day: %s' % (datetime.timedelta(days=1)))
# else:
# print(match_date)
# print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# import cx_Oracle
# connection = None
# try:
# connection = cx_Oracle.connect(
# 'JW',
# '901203',
# 'HOME-PC/XE',
# encoding='UTF-8')
# # show the version of the Oracle Database
# print(connection.version)
# c = connection.cursor()
# c.execute('SELECT MATCH_ID,MATCH_DATETIME,LEAGUE,HOME_TEAM,AWAY_TEAM,HOME_FT_GOAL,AWAY_FT_GOAL,CASE WHEN HOME_FT_GOAL > AWAY_FT_GOAL THEN \'H\' WHEN HOME_FT_GOAL = AWAY_FT_GOAL THEN \'D\' ELSE \'A\' END HDA_RESULT,DRAW_MEAN,DRAW_MEDIAN,O_MACAU_D,O_BET365_D,O_HKJC_D,AWAY_MEAN,AWAY_MEDIAN,O_MACAU_A,O_BET365_A,O_HKJC_A,HKJC_ASIAN_HANDICAP,HKJC_ASIAN_AWAY FROM HDA_MEAN_VIEW WHERE MATCH_HANDICAP=\'上盤\' AND HOME_IND=0 AND DRAW_IND=1 AND AWAY_IND=1 ORDER BY MATCH_DATETIME DESC')
# for row in c:
# print(row[0],',',row[1])
# except cx_Oracle.Error as error:
# print(error)
# finally:
# # release the connection
# if connection:
# connection.close()
|
normal
|
{
"blob_id": "4dac8e7e695c473cb73ceaf3887373bcc0a08aff",
"index": 5940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(datetime.strptime('2013-1-25', '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))\n",
"step-3": "from datetime import datetime, time, timedelta\nprint(datetime.strptime('2013-1-25', '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))\n",
"step-4": "# from datetime import datetime\nfrom datetime import datetime, time, timedelta\n# today = datetime.now()\n# previous_day = today - timedelta(days=1)\n# previous_day = previous_day.strftime(\"%Y%m%d\")\n# print(today)\n# print(previous_day)\n\nprint(datetime.strptime(\"2013-1-25\", '%Y-%m-%d').strftime('%Y-%m-%d 00:00:00'))\n\n# def is_midnight():\n# current_time = datetime.now().time\n# match_date = datetime.now()\n# # current_time = datetime.now()\n# if current_time >= time(0,0) and current_time <= time(4, 30):\n# print(match_date)\n# return match_date.strftime(\"%Y%m%d\")\n# else:\n# match_date = current_time - timedelta(days=1)\n# return match_date\n \n# print(is_midnight())\n\n# def is_midnight():\n# current_time = datetime.now().time()\n# # today = datetime.now()\n# previous_day = current_time - timedelta(days=1)\n# previous_day = previous_day.strftime(\"%Y%m%d\")\n# if current_time >= time(0,0) and current_time <= time(4, 30):\n# print('yesy')\n # if time(0,0) < time(4, 30):\n # return current_time >= time(0,0) and current_time <= time(4, 30)\n # else: # crosses midnight\n # return current_time >= time(0,0) or current_time <= time(4, 30)\n\n# print(is_midnight())\n\n\n# match_date = datetime.now()\n# current_time = datetime.now().time()\n# if current_time >= time(0,0) and current_time <= time(4, 30):\n# previous_day = match_date - timedelta(days=1)\n# match_date = previous_day.strftime(\"%Y%m%d\")\n \n# print(match_date)\n\n# match_date = datetime.now().strftime(\"%Y-%m-%d\")\n# # datetime.timedelta(days=1)\n# hour = datetime.now().strftime(\"%H\")\n# if int(hour) < 4:\n# print('Previous day: %s' % (datetime.timedelta(days=1)))\n# else:\n# print(match_date)\n \n# print(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n# import cx_Oracle\n\n# connection = None\n# try:\n# connection = cx_Oracle.connect(\n# 'JW',\n# '901203',\n# 'HOME-PC/XE',\n# encoding='UTF-8')\n\n# # show the version of the Oracle Database\n# print(connection.version)\n \n# c = connection.cursor()\n# c.execute('SELECT MATCH_ID,MATCH_DATETIME,LEAGUE,HOME_TEAM,AWAY_TEAM,HOME_FT_GOAL,AWAY_FT_GOAL,CASE WHEN HOME_FT_GOAL > AWAY_FT_GOAL THEN \\'H\\' WHEN HOME_FT_GOAL = AWAY_FT_GOAL THEN \\'D\\' ELSE \\'A\\' END HDA_RESULT,DRAW_MEAN,DRAW_MEDIAN,O_MACAU_D,O_BET365_D,O_HKJC_D,AWAY_MEAN,AWAY_MEDIAN,O_MACAU_A,O_BET365_A,O_HKJC_A,HKJC_ASIAN_HANDICAP,HKJC_ASIAN_AWAY FROM HDA_MEAN_VIEW WHERE MATCH_HANDICAP=\\'上盤\\' AND HOME_IND=0 AND DRAW_IND=1 AND AWAY_IND=1 ORDER BY MATCH_DATETIME DESC')\n# for row in c:\n# print(row[0],',',row[1])\n# except cx_Oracle.Error as error:\n# print(error)\n# finally:\n# # release the connection\n# if connection:\n# connection.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class IssueManager(models.Manager):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IssueManager(models.Manager):
def open(self):
return self.filter(status__is_closed=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IssueManager(models.Manager):
def open(self):
return self.filter(status__is_closed=False)
def closed(self):
return self.filter(status__is_closed=True)
<|reserved_special_token_1|>
from django.db import models
class IssueManager(models.Manager):
def open(self):
return self.filter(status__is_closed=False)
def closed(self):
return self.filter(status__is_closed=True)
|
flexible
|
{
"blob_id": "4c54cfefbaf90c1dd0648485e62bff1f2787ccfe",
"index": 2784,
"step-1": "<mask token>\n\n\nclass IssueManager(models.Manager):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IssueManager(models.Manager):\n\n def open(self):\n return self.filter(status__is_closed=False)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass IssueManager(models.Manager):\n\n def open(self):\n return self.filter(status__is_closed=False)\n\n def closed(self):\n return self.filter(status__is_closed=True)\n",
"step-4": "from django.db import models\n\n\nclass IssueManager(models.Manager):\n\n def open(self):\n return self.filter(status__is_closed=False)\n\n def closed(self):\n return self.filter(status__is_closed=True)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request,parse
# req = request.Request('https://api.douban.com/v2/book/2129650')
# req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')
# with request.urlopen(req) as f:
# data = f.read()
# print('Status:', f.status, f.reason)
# for k, v in f.getheaders():
# print('%s:%s' % (k, v))
# print('Data:', data.decode('utf-8'))
print('Login to weibo.com')
email = input('Email:')
passwd = input('Password:')
login_data = parse.urlencode([
('username', email),
('password', passwd),
('entry', 'mwei'),
('client_id', ''),
('savestate', 1),
('ec', ''),
('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])
req = request.Request('https://chenshuaijun.com')
req.add_header('Host', 'chenshuaijun.com')
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
|
normal
|
{
"blob_id": "9bd63181de024c2f4517defa9ed51bdbc8d610d2",
"index": 6025,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Login to weibo.com')\n<mask token>\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'\n )\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-3": "<mask token>\nprint('Login to weibo.com')\nemail = input('Email:')\npasswd = input('Password:')\nlogin_data = parse.urlencode([('username', email), ('password', passwd), (\n 'entry', 'mwei'), ('client_id', ''), ('savestate', 1), ('ec', ''), (\n 'pagerefer',\n 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F'\n )])\nreq = request.Request('https://chenshuaijun.com')\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'\n )\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-4": "from urllib import request, parse\nprint('Login to weibo.com')\nemail = input('Email:')\npasswd = input('Password:')\nlogin_data = parse.urlencode([('username', email), ('password', passwd), (\n 'entry', 'mwei'), ('client_id', ''), ('savestate', 1), ('ec', ''), (\n 'pagerefer',\n 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F'\n )])\nreq = request.Request('https://chenshuaijun.com')\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'\n )\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom urllib import request,parse\n\n# req = request.Request('https://api.douban.com/v2/book/2129650')\n# req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')\n# with request.urlopen(req) as f:\n# data = f.read()\n# print('Status:', f.status, f.reason)\n# for k, v in f.getheaders():\n# print('%s:%s' % (k, v))\n# print('Data:', data.decode('utf-8'))\n\nprint('Login to weibo.com')\nemail = input('Email:')\npasswd = input('Password:')\nlogin_data = parse.urlencode([\n ('username', email),\n ('password', passwd),\n ('entry', 'mwei'),\n ('client_id', ''),\n ('savestate', 1),\n ('ec', ''),\n ('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')\n])\n\nreq = request.Request('https://chenshuaijun.com')\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')\n\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution(object):
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
MOD = 10 ** 9 + 7
dz = zip((1,0,-1,0),(0,1,0,-1))
dp = [[0]* n for x in range(m)]
dp[i][j] = 1
ans = 0
for _ in range(N):
ndp = [[0] * n for x in range(m)]
for x in range(m):
for y in range(n):
for dx,dy in dz:
nx,ny = x + dx, y+dy
if 0 <= nx < m and 0 <= ny <n:
ndp[nx][ny]= (ndp[nx][ny]+dp[x][y])%MOD
else:
ans = (ans + dp[x][y])% MOD
dp = ndp
return ans
|
normal
|
{
"blob_id": "ebbc79d6582f7d6139e0dcec6333b679bb86c63c",
"index": 1383,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def findPaths(self, m, n, N, i, j):\n \"\"\"\n :type m: int\n :type n: int\n :type N: int\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n MOD = 10 ** 9 + 7\n dz = zip((1, 0, -1, 0), (0, 1, 0, -1))\n dp = [([0] * n) for x in range(m)]\n dp[i][j] = 1\n ans = 0\n for _ in range(N):\n ndp = [([0] * n) for x in range(m)]\n for x in range(m):\n for y in range(n):\n for dx, dy in dz:\n nx, ny = x + dx, y + dy\n if 0 <= nx < m and 0 <= ny < n:\n ndp[nx][ny] = (ndp[nx][ny] + dp[x][y]) % MOD\n else:\n ans = (ans + dp[x][y]) % MOD\n dp = ndp\n return ans\n",
"step-4": "class Solution(object):\n def findPaths(self, m, n, N, i, j):\n \"\"\"\n :type m: int\n :type n: int\n :type N: int\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n MOD = 10 ** 9 + 7\n dz = zip((1,0,-1,0),(0,1,0,-1))\n dp = [[0]* n for x in range(m)]\n dp[i][j] = 1\n ans = 0\n for _ in range(N):\n ndp = [[0] * n for x in range(m)]\n for x in range(m):\n for y in range(n):\n for dx,dy in dz:\n nx,ny = x + dx, y+dy\n if 0 <= nx < m and 0 <= ny <n:\n ndp[nx][ny]= (ndp[nx][ny]+dp[x][y])%MOD\n else:\n ans = (ans + dp[x][y])% MOD\n \n dp = ndp\n \n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import rospy
import numpy as np
from sensor_msgs.msg import Image
import cv2, cv_bridge
from geometry_msgs.msg import Twist, Pose2D
from std_msgs.msg import String
import pytesseract as ocr
from PIL import Image as imagePil
import os
import time
from roseli.srv import CreateMap, CreateMapRequest
from roseli.srv import TagImage, TagImageResponse
from roseli.srv import ResetEnc, ResetEncRequest
from dynamic_reconfigure.server import Server
from roseli.cfg import ocr_tagConfig
class ReadTag:
def __init__(self):
self.bridge = cv_bridge.CvBridge()
self.twist=Twist()
self.image_server = rospy.Service('/cropTag', TagImage, self.image_callback) #/cropTag
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.range_param = Server(ocr_tagConfig, self.reconfigure)
self.string = String()
self._pose2d_ = Pose2D()
self.rate = rospy.Rate(1)
def reconfigure(self, config, level):
#print(config)
self.min_h = config.min_hue_ocr
self.min_s = config.min_saturation_ocr
self.min_v = config.min_value_ocr
self.max_h = config.max_hue_ocr
self.max_s = config.max_saturation_ocr
self.max_v = config.max_value_ocr
return config
def creating_map_client(self, pose2d, ip):
rospy.wait_for_service('/pose2D')
try:
create_map = rospy.ServiceProxy('/pose2D', CreateMap)
resp = CreateMapRequest(pose2d, ip)
return create_map(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def reset_enc_func(self):
rospy.wait_for_service('/reset_enc_server')
try:
reset = rospy.ServiceProxy('/reset_enc_server', ResetEnc)
resp = ResetEncRequest()
return reset(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def image_callback (self, msg):
self.twist.linear.x = 0
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
self.rate.sleep()
try:
img = self.bridge.imgmsg_to_cv2(msg.tag, "bgr8")
except cv_bridge.CvBridgeError as e:
print ("Error: Imagem da Tag nao recebida")
print(e)
lowerBound1=np.array([self.min_h, self.min_s, self.min_v]) #lower boundary of the HSV image
upperBound1=np.array([self.max_h, self.max_s, self.max_v]) #Upper boundary of the HSV image
img_HSV=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
imgThresholder=cv2.inRange(img_HSV,lowerBound1,upperBound1,1)
cv2.imshow('picamera', img)
cv2.waitKey(500)
kernel = np.ones((3, 3), np.uint8)
imgFilter=cv2.morphologyEx(imgThresholder, cv2.MORPH_DILATE, kernel)
#imgFilter=cv2.adaptiveThreshold(imgThresholder, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 1)
cv2.imshow('window_tag', imgFilter)
cv2.waitKey(500)
#cv2.destroyAllWindows()
#cv2.waitKey(1000)
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, imgFilter)
text = ocr.image_to_string(imagePil.open(filename),config="-c tessedit_char_whitelist=1234567890.")
os.remove(filename)
print(text)
separated= text.split(' ')
if (not len(separated) == 3):
print("It doesn't read a tag!")
return TagImageResponse()
else:
self._pose2d_.x = float(separated[0])
self._pose2d_.y = float(separated[1])
self._pose2d_.theta = float(separated[2])
_resp_ = self.creating_map_client(self._pose2d_, 0)
flag = self.reset_enc_func()
self.twist.linear.x = 0.3
self.twist.angular.z = 0
for x in range(0, 10):
self.cmd_vel_pub.publish(self.twist)
time.sleep(0.5)
return TagImageResponse()
if __name__=='__main__':
try:
rospy.init_node('readtag')
readtag = ReadTag()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
normal
|
{
"blob_id": "83ce5ee4d2a18caeb364b74c3739015fc0e1474c",
"index": 1344,
"step-1": "#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nfrom sensor_msgs.msg import Image\nimport cv2, cv_bridge\nfrom geometry_msgs.msg import Twist, Pose2D\nfrom std_msgs.msg import String\nimport pytesseract as ocr\nfrom PIL import Image as imagePil\nimport os\nimport time\nfrom roseli.srv import CreateMap, CreateMapRequest\nfrom roseli.srv import TagImage, TagImageResponse\nfrom roseli.srv import ResetEnc, ResetEncRequest\nfrom dynamic_reconfigure.server import Server\nfrom roseli.cfg import ocr_tagConfig\n\nclass ReadTag:\n\n\tdef __init__(self):\n\t\tself.bridge = cv_bridge.CvBridge()\n\t\tself.twist=Twist()\n\t\tself.image_server = rospy.Service('/cropTag', TagImage, self.image_callback) #/cropTag\n\t\tself.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)\n\t\tself.range_param = Server(ocr_tagConfig, self.reconfigure)\n\t\tself.string = String()\n\t\tself._pose2d_ = Pose2D()\n\t\tself.rate = rospy.Rate(1)\n\n\tdef reconfigure(self, config, level):\n\t\t#print(config)\n\t\tself.min_h = config.min_hue_ocr\n\t\tself.min_s = config.min_saturation_ocr\n\t\tself.min_v = config.min_value_ocr\n\t\tself.max_h = config.max_hue_ocr\n\t\tself.max_s = config.max_saturation_ocr\n\t\tself.max_v = config.max_value_ocr\n\t\treturn config\n\n\tdef creating_map_client(self, pose2d, ip):\n\n\t\trospy.wait_for_service('/pose2D')\n\n\t\ttry:\n\t\t\tcreate_map = rospy.ServiceProxy('/pose2D', CreateMap)\n\t\t\tresp = CreateMapRequest(pose2d, ip)\n\t\t\treturn create_map(resp)\n\t\texcept rospy.ServiceException, e:\n\t\t\tprint \"Service call failed: %s\"%e\n\n\tdef reset_enc_func(self):\n\n rospy.wait_for_service('/reset_enc_server')\n\n try:\n reset = rospy.ServiceProxy('/reset_enc_server', ResetEnc)\n resp = ResetEncRequest()\n return reset(resp)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n\n\tdef image_callback (self, msg):\n\t\tself.twist.linear.x = 0\n self.twist.angular.z = 0\n self.cmd_vel_pub.publish(self.twist)\n\t\tself.rate.sleep()\n\t\ttry:\n\t\t\timg = self.bridge.imgmsg_to_cv2(msg.tag, \"bgr8\")\n\t\texcept cv_bridge.CvBridgeError as e:\n\t\t\tprint (\"Error: Imagem da Tag nao recebida\")\n\t\t\tprint(e)\n\n\t\tlowerBound1=np.array([self.min_h, self.min_s, self.min_v]) #lower boundary of the HSV image\n\t\tupperBound1=np.array([self.max_h, self.max_s, self.max_v]) #Upper boundary of the HSV image\n\t\timg_HSV=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\t\timgThresholder=cv2.inRange(img_HSV,lowerBound1,upperBound1,1)\n\n\t\tcv2.imshow('picamera', img)\n\t\tcv2.waitKey(500)\n\t\tkernel = np.ones((3, 3), np.uint8)\n\t\timgFilter=cv2.morphologyEx(imgThresholder, cv2.MORPH_DILATE, kernel)\n\t\t#imgFilter=cv2.adaptiveThreshold(imgThresholder, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 1)\n\t\tcv2.imshow('window_tag', imgFilter)\n\t\tcv2.waitKey(500)\n\t\t#cv2.destroyAllWindows()\n\t\t#cv2.waitKey(1000)\n\t\tfilename = \"{}.png\".format(os.getpid())\n\t\tcv2.imwrite(filename, imgFilter)\n\t\ttext = ocr.image_to_string(imagePil.open(filename),config=\"-c tessedit_char_whitelist=1234567890.\")\n\t\tos.remove(filename)\n\t\tprint(text)\n\t\tseparated= text.split(' ')\n\n\t\tif (not len(separated) == 3):\n\t\t\tprint(\"It doesn't read a tag!\")\n\t\t\treturn TagImageResponse()\n\t\telse:\n\t\t\tself._pose2d_.x = float(separated[0])\n\t\t\tself._pose2d_.y = float(separated[1])\n\t\t\tself._pose2d_.theta = float(separated[2])\n\n\t\t\t_resp_ = self.creating_map_client(self._pose2d_, 0)\n\t\t\tflag = self.reset_enc_func()\n\n\t\t\tself.twist.linear.x = 0.3\n\t\t\tself.twist.angular.z = 0\n\t\t\tfor x in range(0, 10):\n\t\t\t\tself.cmd_vel_pub.publish(self.twist)\n\t\t\t\ttime.sleep(0.5)\n\t\treturn TagImageResponse()\n\nif __name__=='__main__':\n\ttry:\n\t\trospy.init_node('readtag')\n\t\treadtag = ReadTag()\n\t\trospy.spin()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
users = {(1): 'Tom', (2): 'Bob', (3): 'Bill'}
elements = {'Au': 'Oltin', 'Fe': 'Temir', 'H': 'Vodorod', 'O': 'Kislorod'}
<|reserved_special_token_1|>
users = {1: "Tom", 2: "Bob", 3: "Bill"}
elements = {"Au": "Oltin", "Fe": "Temir", "H": "Vodorod", "O": "Kislorod"}
|
flexible
|
{
"blob_id": "a24ab93983546f8ae0fab042c121ac52388e62e8",
"index": 2967,
"step-1": "<mask token>\n",
"step-2": "users = {(1): 'Tom', (2): 'Bob', (3): 'Bill'}\nelements = {'Au': 'Oltin', 'Fe': 'Temir', 'H': 'Vodorod', 'O': 'Kislorod'}\n",
"step-3": "users = {1: \"Tom\", 2: \"Bob\", 3: \"Bill\"}\n\nelements = {\"Au\": \"Oltin\", \"Fe\": \"Temir\", \"H\": \"Vodorod\", \"O\": \"Kislorod\"}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class TestSummary(base.BaseTestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None, end=None, tenant_id=None,
res_type=None, rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, 'ALL')
self.assertEqual(s.res_type, 'ALL')
self.assertEqual(s.rate, '0')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from oslotest import base
from cloudkitty.api.v1.datamodels import report
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None, end=None, tenant_id=None,
res_type=None, rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, 'ALL')
self.assertEqual(s.res_type, 'ALL')
self.assertEqual(s.rate, '0')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test SummaryModel objects."""
from oslotest import base
from cloudkitty.api.v1.datamodels import report
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None,
end=None,
tenant_id=None,
res_type=None,
rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, "ALL")
self.assertEqual(s.res_type, "ALL")
self.assertEqual(s.rate, "0")
|
flexible
|
{
"blob_id": "0ea67ac97ec8e7f287a2430c67f8f7d841d8b646",
"index": 813,
"step-1": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None, end=None, tenant_id=None,\n res_type=None, rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, 'ALL')\n self.assertEqual(s.res_type, 'ALL')\n self.assertEqual(s.rate, '0')\n",
"step-4": "<mask token>\nfrom oslotest import base\nfrom cloudkitty.api.v1.datamodels import report\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None, end=None, tenant_id=None,\n res_type=None, rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, 'ALL')\n self.assertEqual(s.res_type, 'ALL')\n self.assertEqual(s.rate, '0')\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright 2017 Objectif Libre\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\"\"\"Test SummaryModel objects.\"\"\"\nfrom oslotest import base\n\nfrom cloudkitty.api.v1.datamodels import report\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None,\n end=None,\n tenant_id=None,\n res_type=None,\n rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, \"ALL\")\n self.assertEqual(s.res_type, \"ALL\")\n self.assertEqual(s.rate, \"0\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import annotations
from typing import Literal, TypedDict
from .member import Member
from .snowflake import Snowflake
from .user import User
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
<|reserved_special_token_1|>
"""
The MIT License (MIT)
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, TypedDict
from .member import Member
from .snowflake import Snowflake
from .user import User
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
flexible
|
{
"blob_id": "a73dcfc21c31d4e984db39c072d11cb9a9c3d5e5",
"index": 2470,
"step-1": "<mask token>\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-2": "<mask token>\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-3": "<mask token>\nScheduledEventStatus = Literal[1, 2, 3, 4]\nScheduledEventLocationType = Literal[1, 2, 3]\nScheduledEventPrivacyLevel = Literal[2]\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-4": "<mask token>\nfrom __future__ import annotations\nfrom typing import Literal, TypedDict\nfrom .member import Member\nfrom .snowflake import Snowflake\nfrom .user import User\nScheduledEventStatus = Literal[1, 2, 3, 4]\nScheduledEventLocationType = Literal[1, 2, 3]\nScheduledEventPrivacyLevel = Literal[2]\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-5": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Literal, TypedDict\n\nfrom .member import Member\nfrom .snowflake import Snowflake\nfrom .user import User\n\nScheduledEventStatus = Literal[1, 2, 3, 4]\nScheduledEventLocationType = Literal[1, 2, 3]\nScheduledEventPrivacyLevel = Literal[2]\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
'''
File Name: bubustatus/utils.py
Author: JackeyGao
mail: junqi.gao@shuyun.com
Created Time: 一 9/14 12:51:37 2015
'''
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
response.data['status_code'] = response.status_code
return response
|
normal
|
{
"blob_id": "4e6e4917aee2385fe118d6e58c359a4c9fc50943",
"index": 8617,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if response is not None:\n response.data['status_code'] = response.status_code\n return response\n",
"step-3": "<mask token>\nfrom rest_framework.views import exception_handler\n\n\ndef custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if response is not None:\n response.data['status_code'] = response.status_code\n return response\n",
"step-4": "# -*- coding: utf-8 -*-\n'''\nFile Name: bubustatus/utils.py\nAuthor: JackeyGao\nmail: junqi.gao@shuyun.com\nCreated Time: 一 9/14 12:51:37 2015\n'''\nfrom rest_framework.views import exception_handler\n\ndef custom_exception_handler(exc, context):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exc, context)\n\n # Now add the HTTP status code to the response.\n if response is not None:\n response.data['status_code'] = response.status_code\n\n return response\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def check_type(item, target):
assert item == target
def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],
ret: Any):
def get_value(v):
if isinstance(v, ast.BinOp):
a = get_value(v.left)
b = get_value(v.right)
return a
elif isinstance(v, ast.Name):
return loc.get(v.id)
elif isinstance(v, ast.Call):
args = [get_value(a) for a in v.args]
func = loc.get(v.func.id, None) or glob.get(v.func.id, None)
return func(*args)
elif isinstance(v, ast.List):
return [get_value(e) for e in v.elts]
elif isinstance(v, ast.Constant):
return v.value
seg = get_source_segment(source, v)
return eval(seg, glob, loc)
for line in body:
if isinstance(line, ast.Return):
value = get_value(line.value)
check_type(value, ret)
elif isinstance(line, ast.If):
loc1, loc2 = copy(loc), copy(loc)
exec_lines(source, line.body, loc1, glob, ret)
exec_lines(source, line.orelse, loc2, glob, ret)
elif isinstance(line, ast.Assign):
value = get_value(line.value)
t = line.targets
else:
exec(get_source_segment(source, line), glob, loc)
def check(func):
args = getargs(func.__code__)
hints = get_type_hints(func)
cv = getclosurevars(func)
loc_vars = {n: Any for n in args.args}
ret = hints.pop('return') if 'return' in hints else None
loc_vars.update(hints)
glob_vars = {}
for k, v in cv.globals.items():
if v is np:
glob_vars[k] = NumPy()
else:
glob_vars[k] = defines.get(v, None) or v
source = getsource(func)
f_ast = parse(source).body[0]
body = f_ast.body
exec_lines(source, body, loc_vars, glob_vars, ret)
defines[func] = 1
return func
<|reserved_special_token_1|>
<|reserved_special_token_0|>
defines.update(torch_defs)
def check_type(item, target):
assert item == target
def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],
ret: Any):
def get_value(v):
if isinstance(v, ast.BinOp):
a = get_value(v.left)
b = get_value(v.right)
return a
elif isinstance(v, ast.Name):
return loc.get(v.id)
elif isinstance(v, ast.Call):
args = [get_value(a) for a in v.args]
func = loc.get(v.func.id, None) or glob.get(v.func.id, None)
return func(*args)
elif isinstance(v, ast.List):
return [get_value(e) for e in v.elts]
elif isinstance(v, ast.Constant):
return v.value
seg = get_source_segment(source, v)
return eval(seg, glob, loc)
for line in body:
if isinstance(line, ast.Return):
value = get_value(line.value)
check_type(value, ret)
elif isinstance(line, ast.If):
loc1, loc2 = copy(loc), copy(loc)
exec_lines(source, line.body, loc1, glob, ret)
exec_lines(source, line.orelse, loc2, glob, ret)
elif isinstance(line, ast.Assign):
value = get_value(line.value)
t = line.targets
else:
exec(get_source_segment(source, line), glob, loc)
def check(func):
args = getargs(func.__code__)
hints = get_type_hints(func)
cv = getclosurevars(func)
loc_vars = {n: Any for n in args.args}
ret = hints.pop('return') if 'return' in hints else None
loc_vars.update(hints)
glob_vars = {}
for k, v in cv.globals.items():
if v is np:
glob_vars[k] = NumPy()
else:
glob_vars[k] = defines.get(v, None) or v
source = getsource(func)
f_ast = parse(source).body[0]
body = f_ast.body
exec_lines(source, body, loc_vars, glob_vars, ret)
defines[func] = 1
return func
<|reserved_special_token_1|>
<|reserved_special_token_0|>
defines = {}
defines.update(torch_defs)
def check_type(item, target):
assert item == target
def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],
ret: Any):
def get_value(v):
if isinstance(v, ast.BinOp):
a = get_value(v.left)
b = get_value(v.right)
return a
elif isinstance(v, ast.Name):
return loc.get(v.id)
elif isinstance(v, ast.Call):
args = [get_value(a) for a in v.args]
func = loc.get(v.func.id, None) or glob.get(v.func.id, None)
return func(*args)
elif isinstance(v, ast.List):
return [get_value(e) for e in v.elts]
elif isinstance(v, ast.Constant):
return v.value
seg = get_source_segment(source, v)
return eval(seg, glob, loc)
for line in body:
if isinstance(line, ast.Return):
value = get_value(line.value)
check_type(value, ret)
elif isinstance(line, ast.If):
loc1, loc2 = copy(loc), copy(loc)
exec_lines(source, line.body, loc1, glob, ret)
exec_lines(source, line.orelse, loc2, glob, ret)
elif isinstance(line, ast.Assign):
value = get_value(line.value)
t = line.targets
else:
exec(get_source_segment(source, line), glob, loc)
def check(func):
args = getargs(func.__code__)
hints = get_type_hints(func)
cv = getclosurevars(func)
loc_vars = {n: Any for n in args.args}
ret = hints.pop('return') if 'return' in hints else None
loc_vars.update(hints)
glob_vars = {}
for k, v in cv.globals.items():
if v is np:
glob_vars[k] = NumPy()
else:
glob_vars[k] = defines.get(v, None) or v
source = getsource(func)
f_ast = parse(source).body[0]
body = f_ast.body
exec_lines(source, body, loc_vars, glob_vars, ret)
defines[func] = 1
return func
<|reserved_special_token_1|>
import numpy as np
import sympy as sp
from copy import copy
from typing import Any, get_type_hints, Dict
from inspect import getclosurevars, getsource, getargs
import ast
from ast import parse, get_source_segment
from .numpy import NumPy
from .torch import torch_defs
defines = {}
defines.update(torch_defs)
def check_type(item, target):
assert item == target
def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],
ret: Any):
def get_value(v):
if isinstance(v, ast.BinOp):
a = get_value(v.left)
b = get_value(v.right)
return a
elif isinstance(v, ast.Name):
return loc.get(v.id)
elif isinstance(v, ast.Call):
args = [get_value(a) for a in v.args]
func = loc.get(v.func.id, None) or glob.get(v.func.id, None)
return func(*args)
elif isinstance(v, ast.List):
return [get_value(e) for e in v.elts]
elif isinstance(v, ast.Constant):
return v.value
seg = get_source_segment(source, v)
return eval(seg, glob, loc)
for line in body:
if isinstance(line, ast.Return):
value = get_value(line.value)
check_type(value, ret)
elif isinstance(line, ast.If):
loc1, loc2 = copy(loc), copy(loc)
exec_lines(source, line.body, loc1, glob, ret)
exec_lines(source, line.orelse, loc2, glob, ret)
elif isinstance(line, ast.Assign):
value = get_value(line.value)
t = line.targets
else:
exec(get_source_segment(source, line), glob, loc)
def check(func):
args = getargs(func.__code__)
hints = get_type_hints(func)
cv = getclosurevars(func)
loc_vars = {n: Any for n in args.args}
ret = hints.pop('return') if 'return' in hints else None
loc_vars.update(hints)
glob_vars = {}
for k, v in cv.globals.items():
if v is np:
glob_vars[k] = NumPy()
else:
glob_vars[k] = defines.get(v, None) or v
source = getsource(func)
f_ast = parse(source).body[0]
body = f_ast.body
exec_lines(source, body, loc_vars, glob_vars, ret)
defines[func] = 1
return func
|
flexible
|
{
"blob_id": "430b5ca7212983743cadc36a2ada987bb721174a",
"index": 3537,
"step-1": "<mask token>\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-2": "<mask token>\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-3": "<mask token>\ndefines = {}\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-4": "import numpy as np\nimport sympy as sp\nfrom copy import copy\nfrom typing import Any, get_type_hints, Dict\nfrom inspect import getclosurevars, getsource, getargs\nimport ast\nfrom ast import parse, get_source_segment\nfrom .numpy import NumPy\nfrom .torch import torch_defs\ndefines = {}\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
#!/usr/bin/env python
# coding: utf-8
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
df = pd.read_csv('orb.csv')
d = pd.pivot_table(df,index='col1',columns='col2',values='result')
d.fillna(0,inplace=True)
|
normal
|
{
"blob_id": "ce65a672cae26bdb8ec8cb04eabfe1877f9cd7d4",
"index": 9558,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nd.fillna(0, inplace=True)\n",
"step-3": "<mask token>\ndf = pd.read_csv('orb.csv')\nd = pd.pivot_table(df, index='col1', columns='col2', values='result')\nd.fillna(0, inplace=True)\n",
"step-4": "from sklearn.metrics import confusion_matrix\nimport numpy as np\nimport pandas as pd\ndf = pd.read_csv('orb.csv')\nd = pd.pivot_table(df, index='col1', columns='col2', values='result')\nd.fillna(0, inplace=True)\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nimport pandas as pd\n\n\ndf = pd.read_csv('orb.csv')\nd = pd.pivot_table(df,index='col1',columns='col2',values='result')\nd.fillna(0,inplace=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.