code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
# created by Angus Clark 9/2/17 updated 27/2/17
# ToDo impliment traceroute function into this
# Perhaps get rid of unnecessary itemediate temp file
import socket
import os
import json
import my_traceroute
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '130.56.253.43'
#print host
port = 5201 # Change port (must enable security settigns of server)
s.bind((host,port))
s.listen(5)
MAX_HOPS = 30 # max hops for traceroute
while True:
c, addr = s.accept() #accept incoming Connection
f = open('temp.json','wb') # open blank binary to dump incoming data
#print addr[0]
l = c.recv(1024)
while(l):
# Dump data into temp file and get next chunk of data
f.write(l)
l = c.recv(1024)
f.close()
c.close()
tempfile = open('temp.json','rb')
info = json.load(tempfile)
info["UserInfo"]["ip"] = addr[0] # store ip address of sender
last_addr = '0.0.0.0' # placeholder for first iteration
for hop in range(1,MAX_HOPS):
result = my_traceroute.traceroute(hop, info["UserInfo"]["ip"])
#print result
if result == -1:
break
if result[1] == last_addr:
break
info["TRACEROUTE"][str(result[0])] = {}
info["TRACEROUTE"][str(result[0])].update({'node':result[1], 'rtt':result[2]})
last_addr = result[1]
id = info["UserInfo"]["user id"]
timestamp = info["UserInfo"]["timestamp"]
os.system('mkdir /home/ubuntu/data/'+id)
path = "/home/ubuntu/data/" + id + "/"
filename = timestamp + '.json'
savefile = open(path + filename, 'w+')
savefile.write(json.dumps(info))
savefile.close()
|
normal
|
{
"blob_id": "792f62c72f1667f651567314b062d862abbc9aa5",
"index": 6692,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.bind((host, port))\ns.listen(5)\n<mask token>\nwhile True:\n c, addr = s.accept()\n f = open('temp.json', 'wb')\n l = c.recv(1024)\n while l:\n f.write(l)\n l = c.recv(1024)\n f.close()\n c.close()\n tempfile = open('temp.json', 'rb')\n info = json.load(tempfile)\n info['UserInfo']['ip'] = addr[0]\n last_addr = '0.0.0.0'\n for hop in range(1, MAX_HOPS):\n result = my_traceroute.traceroute(hop, info['UserInfo']['ip'])\n if result == -1:\n break\n if result[1] == last_addr:\n break\n info['TRACEROUTE'][str(result[0])] = {}\n info['TRACEROUTE'][str(result[0])].update({'node': result[1], 'rtt':\n result[2]})\n last_addr = result[1]\n id = info['UserInfo']['user id']\n timestamp = info['UserInfo']['timestamp']\n os.system('mkdir /home/ubuntu/data/' + id)\n path = '/home/ubuntu/data/' + id + '/'\n filename = timestamp + '.json'\n savefile = open(path + filename, 'w+')\n savefile.write(json.dumps(info))\n savefile.close()\n",
"step-3": "<mask token>\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = '130.56.253.43'\nport = 5201\ns.bind((host, port))\ns.listen(5)\nMAX_HOPS = 30\nwhile True:\n c, addr = s.accept()\n f = open('temp.json', 'wb')\n l = c.recv(1024)\n while l:\n f.write(l)\n l = c.recv(1024)\n f.close()\n c.close()\n tempfile = open('temp.json', 'rb')\n info = json.load(tempfile)\n info['UserInfo']['ip'] = addr[0]\n last_addr = '0.0.0.0'\n for hop in range(1, MAX_HOPS):\n result = my_traceroute.traceroute(hop, info['UserInfo']['ip'])\n if result == -1:\n break\n if result[1] == last_addr:\n break\n info['TRACEROUTE'][str(result[0])] = {}\n info['TRACEROUTE'][str(result[0])].update({'node': result[1], 'rtt':\n result[2]})\n last_addr = result[1]\n id = info['UserInfo']['user id']\n timestamp = info['UserInfo']['timestamp']\n os.system('mkdir /home/ubuntu/data/' + id)\n path = '/home/ubuntu/data/' + id + '/'\n filename = timestamp + '.json'\n savefile = open(path + filename, 'w+')\n savefile.write(json.dumps(info))\n savefile.close()\n",
"step-4": "import socket\nimport os\nimport json\nimport my_traceroute\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = '130.56.253.43'\nport = 5201\ns.bind((host, port))\ns.listen(5)\nMAX_HOPS = 30\nwhile True:\n c, addr = s.accept()\n f = open('temp.json', 'wb')\n l = c.recv(1024)\n while l:\n f.write(l)\n l = c.recv(1024)\n f.close()\n c.close()\n tempfile = open('temp.json', 'rb')\n info = json.load(tempfile)\n info['UserInfo']['ip'] = addr[0]\n last_addr = '0.0.0.0'\n for hop in range(1, MAX_HOPS):\n result = my_traceroute.traceroute(hop, info['UserInfo']['ip'])\n if result == -1:\n break\n if result[1] == last_addr:\n break\n info['TRACEROUTE'][str(result[0])] = {}\n info['TRACEROUTE'][str(result[0])].update({'node': result[1], 'rtt':\n result[2]})\n last_addr = result[1]\n id = info['UserInfo']['user id']\n timestamp = info['UserInfo']['timestamp']\n os.system('mkdir /home/ubuntu/data/' + id)\n path = '/home/ubuntu/data/' + id + '/'\n filename = timestamp + '.json'\n savefile = open(path + filename, 'w+')\n savefile.write(json.dumps(info))\n savefile.close()\n",
"step-5": "# created by Angus Clark 9/2/17 updated 27/2/17\n# ToDo impliment traceroute function into this \n# Perhaps get rid of unnecessary itemediate temp file\n\nimport socket\nimport os\nimport json\nimport my_traceroute\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \nhost = '130.56.253.43'\n#print host\nport = 5201 # Change port (must enable security settigns of server)\ns.bind((host,port))\ns.listen(5)\nMAX_HOPS = 30 # max hops for traceroute\n\nwhile True:\n c, addr = s.accept() #accept incoming Connection\n f = open('temp.json','wb') # open blank binary to dump incoming data\n #print addr[0]\n l = c.recv(1024)\n while(l):\n # Dump data into temp file and get next chunk of data\n f.write(l)\n l = c.recv(1024)\n f.close()\n c.close()\n \n tempfile = open('temp.json','rb')\n info = json.load(tempfile)\n info[\"UserInfo\"][\"ip\"] = addr[0] # store ip address of sender\n \n last_addr = '0.0.0.0' # placeholder for first iteration\n for hop in range(1,MAX_HOPS):\n result = my_traceroute.traceroute(hop, info[\"UserInfo\"][\"ip\"])\n #print result\n if result == -1:\n break\n if result[1] == last_addr:\n break\n info[\"TRACEROUTE\"][str(result[0])] = {}\n info[\"TRACEROUTE\"][str(result[0])].update({'node':result[1], 'rtt':result[2]})\n last_addr = result[1]\n \n \n id = info[\"UserInfo\"][\"user id\"]\n timestamp = info[\"UserInfo\"][\"timestamp\"]\n \n os.system('mkdir /home/ubuntu/data/'+id)\n path = \"/home/ubuntu/data/\" + id + \"/\"\n filename = timestamp + '.json'\n \n savefile = open(path + filename, 'w+')\n savefile.write(json.dumps(info))\n savefile.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Vista de Autorizaciones (Clientes/Especialistas/Vendedores)."""
from django.shortcuts import render
from dashboard.json2table import convert
from django.utils.translation import ugettext_lazy as _
from api.connection import api
from login.utils.tools import role_admin_check
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
from dashboard.tools import capitalize as cap, ToolsBackend as Tools
from dashboard.forms import AuthorizationClientFilter
class Autorization:
logo_content_header = "fa fa-key"
vars_page = {}
def generate_header(self, custom_title=None):
if custom_title:
title = "{} - ".format(_("authorizations")).title() + custom_title
else:
title = self.title_content_header
header = {'icon': self.logo_content_header, 'title': title}
return {**header, **self.vars_page}
class AutorizationClient(Autorization):
"""
Manejo de autorizaciones de clientes,
se listan los clientes, en orden de pendiente,
aprobado y rechazado, segun fecha
Para posterior aprovacion o rechazo
"""
@method_decorator(user_passes_test(role_admin_check()))
def list(self, request):
"""
Listado de clientes por autorizar,
se incluyen tambien clientes aprovados y rechazados
"""
obj_api = api()
# actual_page = get_actual_page(request)
token = request.session['token']
title_page = _('User - User Affiliation').title()
filters = {}
form_filters = AuthorizationClientFilter(request.GET)
if form_filters.is_valid(): # Agregamos filtros de encontrarse alguno
filters = form_filters.cleaned_data
tools = Tools()
filters['from_date'] = tools.date_format_to_db(date=filters['from_date'])
filters['until_date'] = tools.date_format_to_db(date=filters['until_date'])
filters = form_filters.cleaned_data
if request.method == 'GET':
if 'approve' in request.GET and request.GET['approve']:
pk = request.GET['approve']
data = {"status":1}
obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)
if 'rejected' in request.GET and request.GET['rejected']:
pk = request.GET['rejected']
data = {"status":2}
obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)
# Traer data para el listado
data = obj_api.get(slug='authorizations/clients/', arg=filters, token=token)
header_table = [("", "code_seller"), ("", "name"),(
"", "document_type_name"), ( "", "document"),(
"", ""), ("", ""), (
"", "document"), (
"", "approve"), ("", "rejected"), (
"", "date_join")]
# Multiples header, una lista por cada nivel de la cabecera
multi_header = [
[
(_("seller code"), {'rowspan': '2'}),
(_('user'), {'rowspan': '1', 'colspan': '3'}),
(_('product'), {'rowspan': '1', 'colspan': '2'}),
(_('user code'), {'rowspan': '2', 'colspan': '1'}),
(_('validation'), {'rowspan': '1', 'colspan': '2'}),
(_('date'), {'rowspan': '2', 'colspan': '1'}),
],
[
(_('name or Social reason'), {}),
(_('type document'), {}),
(_('document number'), {}),
(_('description'), {}),
(_('Query Numbers'), {}),
(_('approve'), {}),
(_('deneis'), {}),
],
]
approve_column = {'type': 'submit', 'data': {'name':'approve','key':'id',
'cls':'btn btn-success','text':cap(_('approve'))}}
rejected_column = {'type': 'submit', 'data': {'name':'rejected','key':'id',
'cls':'btn btn-danger','text':cap(_('rejected'))}}
custom_column = {
"date_join": {'type': 'date', 'data': ('date_join',)},
"approve": {'type': 'if_eval', 'data': ('r["status"]=="0"',),
'next': approve_column},
"rejected": {
'type': 'if_eval',
'data': ('r["status"]=="0"',),
'next': rejected_column
},
}
table = convert(data, header=header_table, multi_header=multi_header, custom_column=custom_column)
# Titulo de la vista y variables de la Clase
vars_page = self.generate_header(custom_title=title_page)
return render(request, 'admin/authorization/clients.html',
{'table': table, 'vars_page': vars_page, 'form_filters':form_filters})
|
normal
|
{
"blob_id": "b78ad3a55eb27fd91f89c22db07fadca297640ab",
"index": 2892,
"step-1": "<mask token>\n\n\nclass Autorization:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n",
"step-2": "<mask token>\n\n\nclass Autorization:\n <mask token>\n <mask token>\n\n def generate_header(self, custom_title=None):\n if custom_title:\n title = '{} - '.format(_('authorizations')).title() + custom_title\n else:\n title = self.title_content_header\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n",
"step-3": "<mask token>\n\n\nclass Autorization:\n logo_content_header = 'fa fa-key'\n vars_page = {}\n\n def generate_header(self, custom_title=None):\n if custom_title:\n title = '{} - '.format(_('authorizations')).title() + custom_title\n else:\n title = self.title_content_header\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n",
"step-4": "<mask token>\nfrom django.shortcuts import render\nfrom dashboard.json2table import convert\nfrom django.utils.translation import ugettext_lazy as _\nfrom api.connection import api\nfrom login.utils.tools import role_admin_check\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import user_passes_test\nfrom dashboard.tools import capitalize as cap, ToolsBackend as Tools\nfrom dashboard.forms import AuthorizationClientFilter\n\n\nclass Autorization:\n logo_content_header = 'fa fa-key'\n vars_page = {}\n\n def generate_header(self, custom_title=None):\n if custom_title:\n title = '{} - '.format(_('authorizations')).title() + custom_title\n else:\n title = self.title_content_header\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n obj_api = api()\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n form_filters = AuthorizationClientFilter(request.GET)\n if form_filters.is_valid():\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters[\n 'from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters[\n 'until_date'])\n filters = form_filters.cleaned_data\n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {'status': 1}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {'status': 2}\n obj_api.put(slug='authorizations/clients/' + pk, token=\n token, arg=data)\n data = obj_api.get(slug='authorizations/clients/', arg=filters,\n token=token)\n header_table = [('', 'code_seller'), ('', 'name'), ('',\n 'document_type_name'), ('', 'document'), ('', ''), ('', ''), (\n '', 'document'), ('', 'approve'), ('', 'rejected'), ('',\n 'date_join')]\n multi_header = [[(_('seller code'), {'rowspan': '2'}), (_('user'),\n {'rowspan': '1', 'colspan': '3'}), (_('product'), {'rowspan':\n '1', 'colspan': '2'}), (_('user code'), {'rowspan': '2',\n 'colspan': '1'}), (_('validation'), {'rowspan': '1', 'colspan':\n '2'}), (_('date'), {'rowspan': '2', 'colspan': '1'})], [(_(\n 'name or Social reason'), {}), (_('type document'), {}), (_(\n 'document number'), {}), (_('description'), {}), (_(\n 'Query Numbers'), {}), (_('approve'), {}), (_('deneis'), {})]]\n approve_column = {'type': 'submit', 'data': {'name': 'approve',\n 'key': 'id', 'cls': 'btn btn-success', 'text': cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name': 'rejected',\n 'key': 'id', 'cls': 'btn btn-danger', 'text': cap(_('rejected'))}}\n custom_column = {'date_join': {'type': 'date', 'data': ('date_join'\n ,)}, 'approve': {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"'\n ,), 'next': approve_column}, 'rejected': {'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',), 'next': rejected_column}}\n table = convert(data, header=header_table, multi_header=\n multi_header, custom_column=custom_column)\n vars_page = self.generate_header(custom_title=title_page)\n return render(request, 'admin/authorization/clients.html', {'table':\n table, 'vars_page': vars_page, 'form_filters': form_filters})\n",
"step-5": "\"\"\"Vista de Autorizaciones (Clientes/Especialistas/Vendedores).\"\"\"\nfrom django.shortcuts import render\nfrom dashboard.json2table import convert\nfrom django.utils.translation import ugettext_lazy as _\nfrom api.connection import api\nfrom login.utils.tools import role_admin_check\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import user_passes_test\nfrom dashboard.tools import capitalize as cap, ToolsBackend as Tools\nfrom dashboard.forms import AuthorizationClientFilter\nclass Autorization:\n logo_content_header = \"fa fa-key\"\n vars_page = {}\n def generate_header(self, custom_title=None):\n if custom_title:\n title = \"{} - \".format(_(\"authorizations\")).title() + custom_title\n else:\n title = self.title_content_header\n\n header = {'icon': self.logo_content_header, 'title': title}\n return {**header, **self.vars_page}\n\n\nclass AutorizationClient(Autorization):\n \"\"\"\n Manejo de autorizaciones de clientes,\n se listan los clientes, en orden de pendiente,\n aprobado y rechazado, segun fecha\n Para posterior aprovacion o rechazo\n \"\"\"\n\n @method_decorator(user_passes_test(role_admin_check()))\n def list(self, request):\n \"\"\"\n Listado de clientes por autorizar,\n se incluyen tambien clientes aprovados y rechazados\n \"\"\"\n\n obj_api = api()\n # actual_page = get_actual_page(request)\n token = request.session['token']\n title_page = _('User - User Affiliation').title()\n filters = {}\n\n form_filters = AuthorizationClientFilter(request.GET)\n\n if form_filters.is_valid(): # Agregamos filtros de encontrarse alguno\n filters = form_filters.cleaned_data\n tools = Tools()\n filters['from_date'] = tools.date_format_to_db(date=filters['from_date'])\n filters['until_date'] = tools.date_format_to_db(date=filters['until_date'])\n filters = form_filters.cleaned_data\n \n if request.method == 'GET':\n if 'approve' in request.GET and request.GET['approve']:\n pk = request.GET['approve']\n data = {\"status\":1}\n obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)\n\n if 'rejected' in request.GET and request.GET['rejected']:\n pk = request.GET['rejected']\n data = {\"status\":2}\n obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)\n\n # Traer data para el listado\n data = obj_api.get(slug='authorizations/clients/', arg=filters, token=token)\n\n\n header_table = [(\"\", \"code_seller\"), (\"\", \"name\"),(\n \"\", \"document_type_name\"), ( \"\", \"document\"),(\n \"\", \"\"), (\"\", \"\"), (\n \"\", \"document\"), (\n \"\", \"approve\"), (\"\", \"rejected\"), (\n \"\", \"date_join\")]\n\n # Multiples header, una lista por cada nivel de la cabecera\n multi_header = [\n [\n (_(\"seller code\"), {'rowspan': '2'}),\n (_('user'), {'rowspan': '1', 'colspan': '3'}),\n (_('product'), {'rowspan': '1', 'colspan': '2'}),\n (_('user code'), {'rowspan': '2', 'colspan': '1'}),\n (_('validation'), {'rowspan': '1', 'colspan': '2'}),\n (_('date'), {'rowspan': '2', 'colspan': '1'}),\n ],\n [\n (_('name or Social reason'), {}),\n (_('type document'), {}),\n (_('document number'), {}),\n (_('description'), {}),\n (_('Query Numbers'), {}),\n (_('approve'), {}),\n (_('deneis'), {}),\n ],\n ]\n\n approve_column = {'type': 'submit', 'data': {'name':'approve','key':'id',\n 'cls':'btn btn-success','text':cap(_('approve'))}}\n rejected_column = {'type': 'submit', 'data': {'name':'rejected','key':'id',\n 'cls':'btn btn-danger','text':cap(_('rejected'))}}\n custom_column = {\n \"date_join\": {'type': 'date', 'data': ('date_join',)},\n \"approve\": {'type': 'if_eval', 'data': ('r[\"status\"]==\"0\"',),\n 'next': approve_column},\n \"rejected\": {\n 'type': 'if_eval',\n 'data': ('r[\"status\"]==\"0\"',),\n 'next': rejected_column\n },\n }\n\n table = convert(data, header=header_table, multi_header=multi_header, custom_column=custom_column)\n\n # Titulo de la vista y variables de la Clase\n vars_page = self.generate_header(custom_title=title_page)\n\n return render(request, 'admin/authorization/clients.html',\n {'table': table, 'vars_page': vars_page, 'form_filters':form_filters})",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="数据结构"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("二叉树——递归套路")
r2=s2.getRootTopic()
r2.setTitle("二叉树——递归套路")
content={
'递归套路':[
'可解决面试中绝大多数二叉树问题,尤其是树型dp问题',
'本质是利用递归遍历二叉树的便利性'
],
'思路':[
'1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',
'2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',
'3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',
'4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',
'5.递归函数都返回S,每一棵子树都这么要求',
'6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'
],
'题目1':[
'给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',
{'思路':[
'1.左子树是否平衡',
'2.右子树是否平衡',
'3.左树与右树高在2以内',
]},
{'实现':[
'Class Info(){',
' boolean isBalanced;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(node==null){',
' return node;',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)-1;',
' boolean isBalanced=true;',
' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',
' isBalanced=false;',
' }',
' return new Info(isBalanced,height);',
'}'
]}
],
'题目2':[
'给定一棵二叉树的头节点head,任何两个节点之前都存在距离',
'返回整棵二叉树的最大距离',
{'思路':[
{'1.与头节点无关':[
'max(左侧的最大距离,右侧的最大距离)',
]},
{'2.与头节点有头':[
'左树高+右树高+1'
]}
]},
{'实现':[
'Class Info(){',
' int maxDistance;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(head==null){',
' return new Info(0,0);',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)+1;',
' int maxDistance=Math.max(',
' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',
' leftInfo.height+rightInfo.height+1)',
' return new Info(maxDistance,height);',
'}'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
normal
|
{
"blob_id": "b713e38824db13f919484b071fb35afb29e26baa",
"index": 3803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, parentdir)\n<mask token>\ns2.setTitle('二叉树——递归套路')\n<mask token>\nr2.setTitle('二叉树——递归套路')\n<mask token>\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-3": "<mask token>\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parentdir)\n<mask token>\nxmind_name = '数据结构'\nw = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\ns2 = w.createSheet()\ns2.setTitle('二叉树——递归套路')\nr2 = s2.getRootTopic()\nr2.setTitle('二叉树——递归套路')\ncontent = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':\n ['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',\n '3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',\n ' int height;', '}', '---------------------',\n 'Info process(Node head){', ' if(node==null){', ' return node;',\n ' }', ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'\n , ' isBalanced=false;', ' }',\n ' return new Info(isBalanced,height);', '}']}], '题目2': [\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [\n 'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [\n 'Class Info(){', ' int maxDistance;', ' int height;', '}',\n '---------------------', 'Info process(Node head){',\n ' if(head==null){', ' return new Info(0,0);', ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);', '}']}]}\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-4": "import os, sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parentdir)\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name = '数据结构'\nw = xmind.load(os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\ns2 = w.createSheet()\ns2.setTitle('二叉树——递归套路')\nr2 = s2.getRootTopic()\nr2.setTitle('二叉树——递归套路')\ncontent = {'递归套路': ['可解决面试中绝大多数二叉树问题,尤其是树型dp问题', '本质是利用递归遍历二叉树的便利性'], '思路':\n ['1.假设以x节点为为头,假设可以向X左树和X右树要任何信息', '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息', '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求', '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'], '题目1': [\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树', {'思路': ['1.左子树是否平衡', '2.右子树是否平衡',\n '3.左树与右树高在2以内']}, {'实现': ['Class Info(){', ' boolean isBalanced;',\n ' int height;', '}', '---------------------',\n 'Info process(Node head){', ' if(node==null){', ' return node;',\n ' }', ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){'\n , ' isBalanced=false;', ' }',\n ' return new Info(isBalanced,height);', '}']}], '题目2': [\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离', '返回整棵二叉树的最大距离', {'思路': [{'1.与头节点无关': [\n 'max(左侧的最大距离,右侧的最大距离)']}, {'2.与头节点有头': ['左树高+右树高+1']}]}, {'实现': [\n 'Class Info(){', ' int maxDistance;', ' int height;', '}',\n '---------------------', 'Info process(Node head){',\n ' if(head==null){', ' return new Info(0,0);', ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);', '}']}]}\nxmind.build(content, r2)\nxmind.save(w, os.path.dirname(os.path.abspath(__file__)) + '\\\\' +\n xmind_name + '.xmind')\n",
"step-5": "import os,sys \nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) \nsys.path.insert(0,parentdir) \n\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name=\"数据结构\"\nw = xmind.load(os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") \ns2=w.createSheet()\ns2.setTitle(\"二叉树——递归套路\")\nr2=s2.getRootTopic()\nr2.setTitle(\"二叉树——递归套路\")\n\n\ncontent={\n'递归套路':[\n '可解决面试中绝大多数二叉树问题,尤其是树型dp问题',\n '本质是利用递归遍历二叉树的便利性'\n],\n'思路':[\n '1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',\n '2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',\n '3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',\n '4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',\n '5.递归函数都返回S,每一棵子树都这么要求',\n '6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'\n],\n'题目1':[\n '给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',\n {'思路':[\n '1.左子树是否平衡',\n '2.右子树是否平衡',\n '3.左树与右树高在2以内',\n ]},\n {'实现':[\n 'Class Info(){',\n ' boolean isBalanced;',\n ' int height;',\n '}',\n '---------------------',\n 'Info process(Node head){',\n ' if(node==null){',\n ' return node;',\n ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)-1;',\n ' boolean isBalanced=true;',\n ' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',\n ' isBalanced=false;',\n ' }',\n ' return new Info(isBalanced,height);',\n '}'\n ]}\n],\n'题目2':[\n '给定一棵二叉树的头节点head,任何两个节点之前都存在距离',\n '返回整棵二叉树的最大距离',\n {'思路':[\n {'1.与头节点无关':[\n 'max(左侧的最大距离,右侧的最大距离)',\n ]},\n {'2.与头节点有头':[\n '左树高+右树高+1'\n ]}\n ]},\n {'实现':[\n 'Class Info(){',\n ' int maxDistance;',\n ' int height;',\n '}',\n '---------------------',\n 'Info process(Node head){',\n ' if(head==null){',\n ' return new Info(0,0);',\n ' }',\n ' Info leftInfo=process(head.left);',\n ' Info rightInfo=process(head.right);',\n ' int height=Math.max(leftInfo.height,rightInfo.height)+1;',\n ' int maxDistance=Math.max(',\n ' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',\n ' leftInfo.height+rightInfo.height+1)',\n ' return new Info(maxDistance,height);',\n '}'\n ]}\n \n]\n\n}\n#构建xmind\nxmind.build(content,r2)\n#保存xmind\nxmind.save(w,os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!C:\Python27\python
print('Content-Type:text/html\n\n')
print ("""
<html>
<head>
<link href="iconTech.png" rel="icon"/>
<meta name="viewport" content="width=device-width,intial-scale=1.0"/>
<link href="../css/bootstrap.min.css" rel="stylesheet" type="text/css"/>
<link href="../css/bootstrap-theme.min.css" rel="stylesheet" type="text/css"/>
<link rel="stylesheet" href="../css/font-awesome.min.css" type="text/css"/>
<script src="../js/jquery.js"></script>
<script src="../js/bootstrap.min.js"></script>
<style>
.outer
{
min-height:100px;
}
.top
{
min-height:50px;
background:gray;
}
.logo
{
height:50px;
width:240px;
margin:5px 5px;
background:white;
font-size:30px;
font-family:Algerian;
border:5px double green;
}
.menu
{
height:50px;
width:1000px;
background:gray;
z-index:10;
}
#menu
{
background:none;
border:none;
box-shadow:none;
padding:1% 0%;
margin:0px;
font-size:15px;
}
#menu ul li a
{
color:white;
text-shadow:none;
font-weight:bold;
font-size:12px;
}
#menu ul li:hover
{
background:transparent;
}
.head
{
height:100px;
background:url('../bimg/d1.jpg');
background-attachment:fixed;
background-size:100% 100%;
}
.head1
{
height:100px;
background-color:rgba(0,0,0,.4);
color:white;
font-size:20px;
padding:2% 0%;
}
.addcake
{
min-height:550px;
margin-left:25%;
background:rgba(0,0,0,.3);
margin-top:20px;
margin-bottom:20px;
}
.footer
{
min-height:50px;
padding:1% 0%;
text-align:center;
color:white;
font-size:20px;
background:black;
}
</style>
</head>
<body>
<div class="col-sm-12 outer">
<div class="row">
<div class="col-sm-12 top">
<div class="row">
<div class="col-sm-3 logo">Bake<span style="color:orange;">-o-</span>logy</div>
<div class="col-sm-9 menu"> <nav class="navbar navbar-default" id="menu">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
<span class="sr-only clpbtn">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1" >
<ul class="nav navbar-nav navbar-right">
<li><a href="index.py">Dashboard</a></li>
<li><a href="Addmenu.py">Add Menu</a></li>
<li><a href="Addservices.py">Add Services</a></li>
<li><a href="Addimages.py">Add Images</a></li>
<li><a href="OrderManagement.py">Order Management</a></li>
<li><a href="ContactManagement.py">Contact Management</a></li>
<li><a href="Changepassword.py">Change Password</a></li>
<li><a href="LogOut.py">LogOut</a></li>
</li>
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
</div></div></div>
<div class="col-sm-12 main">
<div class="row">
<div class="col-sm-12 head">
<div class="row">
<div class="col-sm-12 head1">
<div class="text-center"><span class="fa fa-cutlery "></span> Add Cake Menu </div>
</div>
</div></div>
</div></div>
<div class="col-sm-6 addcake">
<div class="h2 text-center">Add Cakes Menu</div>
<form action="../code/cakecode.py" enctype="multipart/form-data" method="post">
<div class="h4">Cake Name</div>
<input type="text" placeholder="Input Your Cake Name" name="cake" class="form-control">
<div class="h4">Cake Size</div>
<input type="text" placeholder="Input Your Cake size" name="size" class="form-control">
<div class="h4">Cake Weight</div>
<input type="text" placeholder="Input Your Cake Flavour" name="flavour" class="form-control">
<div class="h4">Price</div>
<input type="text" placeholder="Input Your Cake Weight" name="weight" class="form-control">
<div class="h4">Cake Flavour</div>
<input type="text" placeholder="Input Your Cake Price" name="price" class="form-control">
<div class="h4">Cake Image</div>
<input type="file" placeholder="Import Your Cake image" name="pic" class="form-control"><br/>
<input type="submit" class="form-control" value="Add">
</div>
<div class="col-sm-12 footer">
<div class="col-sm-6">©copyright:<a target="_blank" href="https://www.techpile.in">Techpile Technology.pvt.Ltd.</a>
</div>
<div class="col-sm-6">
Developed By:-Yash Rastogi</div>
</div>
</div>
</div>
</body>
</html>
""")
|
normal
|
{
"blob_id": "968cfcfe9d31adcd3a67a88a66e5ebe7b719be8d",
"index": 2841,
"step-1": "<mask token>\n",
"step-2": "print('Content-Type:text/html\\n\\n')\nprint(\n \"\"\"\n<html>\n<head>\n<link href=\"iconTech.png\" rel=\"icon\"/>\n<meta name=\"viewport\" content=\"width=device-width,intial-scale=1.0\"/>\n<link href=\"../css/bootstrap.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<link href=\"../css/bootstrap-theme.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<link rel=\"stylesheet\" href=\"../css/font-awesome.min.css\" type=\"text/css\"/>\n<script src=\"../js/jquery.js\"></script>\n<script src=\"../js/bootstrap.min.js\"></script>\n<style>\n.outer\n{\nmin-height:100px;\n}\n.top\n{\n min-height:50px;\n background:gray;\n}\n.logo\n{\n height:50px;\n width:240px;\n margin:5px 5px;\n background:white;\n font-size:30px;\n font-family:Algerian;\n border:5px double green;\n}\n.menu\n{\n height:50px;\n width:1000px;\n background:gray;\n z-index:10;\n}\n#menu\n{\n background:none;\n border:none;\n box-shadow:none;\n padding:1% 0%;\n margin:0px;\n font-size:15px;\n}\n#menu ul li a\n{\n color:white;\n text-shadow:none;\n font-weight:bold;\n font-size:12px;\n}\n#menu ul li:hover\n{\n background:transparent; \n}\n.head\n{\n height:100px;\n background:url('../bimg/d1.jpg');\n background-attachment:fixed;\n background-size:100% 100%;\n}\n.head1\n{\n height:100px;\n background-color:rgba(0,0,0,.4);\n color:white;\n font-size:20px;\n padding:2% 0%;\n}\n.addcake\n{\nmin-height:550px;\nmargin-left:25%;\nbackground:rgba(0,0,0,.3);\nmargin-top:20px;\nmargin-bottom:20px;\n}\n\n.footer\n{\n min-height:50px;\n padding:1% 0%;\n text-align:center;\n color:white;\n font-size:20px;\n background:black;\n}\n</style>\n</head>\n<body>\n<div class=\"col-sm-12 outer\">\n\t\t\t<div class=\"row\">\n\t\t\t\t<div class=\"col-sm-12 top\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t\t<div class=\"col-sm-3 logo\">Bake<span style=\"color:orange;\">-o-</span>logy</div>\n\t\t\t\t\t<div class=\"col-sm-9 menu\">\t\t\t\t\t<nav class=\"navbar navbar-default\" id=\"menu\">\n <div class=\"container-fluid\">\n <!-- Brand and toggle get grouped for better mobile display -->\n <div class=\"navbar-header\">\n <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#bs-example-navbar-collapse-1\" aria-expanded=\"false\">\n <span class=\"sr-only clpbtn\">Toggle navigation</span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n </button>\n </div>\n<!-- Collect the nav links, forms, and other content for toggling -->\n <div class=\"collapse navbar-collapse\" id=\"bs-example-navbar-collapse-1\" >\n <ul class=\"nav navbar-nav navbar-right\">\n <li><a href=\"index.py\">Dashboard</a></li>\n <li><a href=\"Addmenu.py\">Add Menu</a></li>\n\t\t<li><a href=\"Addservices.py\">Add Services</a></li>\n\t\t<li><a href=\"Addimages.py\">Add Images</a></li>\n\t\t<li><a href=\"OrderManagement.py\">Order Management</a></li>\n\t\t<li><a href=\"ContactManagement.py\">Contact Management</a></li>\n\t\t<li><a href=\"Changepassword.py\">Change Password</a></li>\n\t\t<li><a href=\"LogOut.py\">LogOut</a></li>\n\t\t</li>\n \n\t\t </ul>\n\t\t\t \n </div><!-- /.navbar-collapse -->\n </div><!-- /.container-fluid -->\n</nav>\n \t\t\t\t\t\n\t\t </div></div></div>\n\t\t\t\t<div class=\"col-sm-12 main\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t<div class=\"col-sm-12 head\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t<div class=\"col-sm-12 head1\">\n\t\t\t\t<div class=\"text-center\"><span class=\"fa fa-cutlery \"></span> Add Cake Menu </div>\n\t\t\t\t</div>\n\t\t\t\t</div></div>\n\t\t\t\t</div></div>\n\t\t\t\t<div class=\"col-sm-6 addcake\">\n\t\t\t\t<div class=\"h2 text-center\">Add Cakes Menu</div>\n\t\t\t\t<form action=\"../code/cakecode.py\" enctype=\"multipart/form-data\" method=\"post\">\n\t\t\t\t<div class=\"h4\">Cake Name</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Name\" name=\"cake\" class=\"form-control\">\n\t\t\t\t<div class=\"h4\">Cake Size</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake size\" name=\"size\" class=\"form-control\">\n <div class=\"h4\">Cake Weight</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Flavour\" name=\"flavour\" class=\"form-control\">\n <div class=\"h4\">Price</div>\n\t\t\t\t <input type=\"text\" placeholder=\"Input Your Cake Weight\" name=\"weight\" class=\"form-control\">\n <div class=\"h4\">Cake Flavour</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Price\" name=\"price\" class=\"form-control\">\t\n <div class=\"h4\">Cake Image</div>\n\t\t\t\t<input type=\"file\" placeholder=\"Import Your Cake image\" name=\"pic\" class=\"form-control\"><br/>\n <input type=\"submit\" class=\"form-control\" value=\"Add\">\n\t\t\t\t</div>\n\t\t\t\t<div class=\"col-sm-12 footer\">\n\t\t\t\t<div class=\"col-sm-6\">©copyright:<a target=\"_blank\" href=\"https://www.techpile.in\">Techpile Technology.pvt.Ltd.</a>\n\t\t\t\t</div>\n\t\t\t\t\n\t\t\t\t<div class=\"col-sm-6\">\t\n Developed By:-Yash Rastogi</div>\n\t\t\t\t</div>\n\t\t\t\t\n\t\t\t</div>\n\t\t\t</div>\n\n\n</body>\n</html>\n\n\"\"\"\n )\n",
"step-3": "#!C:\\Python27\\python\r\nprint('Content-Type:text/html\\n\\n')\r\nprint (\"\"\"\r\n<html>\r\n<head>\r\n<link href=\"iconTech.png\" rel=\"icon\"/>\r\n<meta name=\"viewport\" content=\"width=device-width,intial-scale=1.0\"/>\r\n<link href=\"../css/bootstrap.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\r\n<link href=\"../css/bootstrap-theme.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\r\n<link rel=\"stylesheet\" href=\"../css/font-awesome.min.css\" type=\"text/css\"/>\r\n<script src=\"../js/jquery.js\"></script>\r\n<script src=\"../js/bootstrap.min.js\"></script>\r\n<style>\r\n.outer\r\n{\r\nmin-height:100px;\r\n}\r\n.top\r\n{\r\n min-height:50px;\r\n background:gray;\r\n}\r\n.logo\r\n{\r\n height:50px;\r\n width:240px;\r\n margin:5px 5px;\r\n background:white;\r\n font-size:30px;\r\n font-family:Algerian;\r\n border:5px double green;\r\n}\r\n.menu\r\n{\r\n height:50px;\r\n width:1000px;\r\n background:gray;\r\n z-index:10;\r\n}\r\n#menu\r\n{\r\n background:none;\r\n border:none;\r\n box-shadow:none;\r\n padding:1% 0%;\r\n margin:0px;\r\n font-size:15px;\r\n}\r\n#menu ul li a\r\n{\r\n color:white;\r\n text-shadow:none;\r\n font-weight:bold;\r\n font-size:12px;\r\n}\r\n#menu ul li:hover\r\n{\r\n background:transparent; \r\n}\r\n.head\r\n{\r\n height:100px;\r\n background:url('../bimg/d1.jpg');\r\n background-attachment:fixed;\r\n background-size:100% 100%;\r\n}\r\n.head1\r\n{\r\n height:100px;\r\n background-color:rgba(0,0,0,.4);\r\n color:white;\r\n font-size:20px;\r\n padding:2% 0%;\r\n}\r\n.addcake\r\n{\r\nmin-height:550px;\r\nmargin-left:25%;\r\nbackground:rgba(0,0,0,.3);\r\nmargin-top:20px;\r\nmargin-bottom:20px;\r\n}\r\n\r\n.footer\r\n{\r\n min-height:50px;\r\n padding:1% 0%;\r\n text-align:center;\r\n color:white;\r\n font-size:20px;\r\n background:black;\r\n}\r\n</style>\r\n</head>\r\n<body>\r\n<div class=\"col-sm-12 outer\">\r\n\t\t\t<div class=\"row\">\r\n\t\t\t\t<div class=\"col-sm-12 top\">\r\n\t\t\t\t<div class=\"row\">\r\n\t\t\t\t\t<div class=\"col-sm-3 logo\">Bake<span style=\"color:orange;\">-o-</span>logy</div>\r\n\t\t\t\t\t<div class=\"col-sm-9 menu\">\t\t\t\t\t<nav class=\"navbar navbar-default\" id=\"menu\">\r\n <div class=\"container-fluid\">\r\n <!-- Brand and toggle get grouped for better mobile display -->\r\n <div class=\"navbar-header\">\r\n <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#bs-example-navbar-collapse-1\" aria-expanded=\"false\">\r\n <span class=\"sr-only clpbtn\">Toggle navigation</span>\r\n <span class=\"icon-bar\"></span>\r\n <span class=\"icon-bar\"></span>\r\n <span class=\"icon-bar\"></span>\r\n </button>\r\n </div>\r\n<!-- Collect the nav links, forms, and other content for toggling -->\r\n <div class=\"collapse navbar-collapse\" id=\"bs-example-navbar-collapse-1\" >\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <li><a href=\"index.py\">Dashboard</a></li>\r\n <li><a href=\"Addmenu.py\">Add Menu</a></li>\r\n\t\t<li><a href=\"Addservices.py\">Add Services</a></li>\r\n\t\t<li><a href=\"Addimages.py\">Add Images</a></li>\r\n\t\t<li><a href=\"OrderManagement.py\">Order Management</a></li>\r\n\t\t<li><a href=\"ContactManagement.py\">Contact Management</a></li>\r\n\t\t<li><a href=\"Changepassword.py\">Change Password</a></li>\r\n\t\t<li><a href=\"LogOut.py\">LogOut</a></li>\r\n\t\t</li>\r\n \r\n\t\t </ul>\r\n\t\t\t \r\n </div><!-- /.navbar-collapse -->\r\n </div><!-- /.container-fluid -->\r\n</nav>\r\n \t\t\t\t\t\r\n\t\t </div></div></div>\r\n\t\t\t\t<div class=\"col-sm-12 main\">\r\n\t\t\t\t<div class=\"row\">\r\n\t\t\t\t<div class=\"col-sm-12 head\">\r\n\t\t\t\t<div class=\"row\">\r\n\t\t\t\t<div class=\"col-sm-12 head1\">\r\n\t\t\t\t<div class=\"text-center\"><span class=\"fa fa-cutlery \"></span> Add Cake Menu </div>\r\n\t\t\t\t</div>\r\n\t\t\t\t</div></div>\r\n\t\t\t\t</div></div>\r\n\t\t\t\t<div class=\"col-sm-6 addcake\">\r\n\t\t\t\t<div class=\"h2 text-center\">Add Cakes Menu</div>\r\n\t\t\t\t<form action=\"../code/cakecode.py\" enctype=\"multipart/form-data\" method=\"post\">\r\n\t\t\t\t<div class=\"h4\">Cake Name</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Name\" name=\"cake\" class=\"form-control\">\r\n\t\t\t\t<div class=\"h4\">Cake Size</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake size\" name=\"size\" class=\"form-control\">\r\n <div class=\"h4\">Cake Weight</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Flavour\" name=\"flavour\" class=\"form-control\">\r\n <div class=\"h4\">Price</div>\r\n\t\t\t\t <input type=\"text\" placeholder=\"Input Your Cake Weight\" name=\"weight\" class=\"form-control\">\r\n <div class=\"h4\">Cake Flavour</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Price\" name=\"price\" class=\"form-control\">\t\r\n <div class=\"h4\">Cake Image</div>\r\n\t\t\t\t<input type=\"file\" placeholder=\"Import Your Cake image\" name=\"pic\" class=\"form-control\"><br/>\r\n <input type=\"submit\" class=\"form-control\" value=\"Add\">\r\n\t\t\t\t</div>\r\n\t\t\t\t<div class=\"col-sm-12 footer\">\r\n\t\t\t\t<div class=\"col-sm-6\">©copyright:<a target=\"_blank\" href=\"https://www.techpile.in\">Techpile Technology.pvt.Ltd.</a>\r\n\t\t\t\t</div>\r\n\t\t\t\t\r\n\t\t\t\t<div class=\"col-sm-6\">\t\r\n Developed By:-Yash Rastogi</div>\r\n\t\t\t\t</div>\r\n\t\t\t\t\r\n\t\t\t</div>\r\n\t\t\t</div>\r\n\r\n\r\n</body>\r\n</html>\r\n\r\n\"\"\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""Tests for Node objects."""
import numpy as np
import unittest
import optimus.core as core
import optimus.nodes as nodes
import optimus.util as util
def __relu__(x):
"Numpy Rectified Linear Unit."
return 0.5 * (np.abs(x) + x)
class NodeTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_Node(self):
pass
def test_Constant(self):
n = nodes.Constant(name='test', shape=None)
n.data.value = 1.0
n.transform()
fx = util.compile(inputs=[], outputs=[n.output])
np.testing.assert_equal(np.array(fx()[0]), 1.0)
def test_Add(self):
x1 = core.Input(name='x1', shape=(2, 2))
x2 = core.Input(name='x2', shape=(2, 2))
n = nodes.Add(name='accumulate', num_inputs=2)
n.input_0.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.input_1.connect(x2)
self.assertIsNone(n.output.shape)
n.transform()
self.assertEqual(n.output.shape, (2, 2))
fx = util.compile(inputs=[x1, x2],
outputs=[n.output])
a = np.array([[3, -1], [3, 7]])
b = np.array([[1, 2], [3, 4]])
z = fx(a, b)[0]
np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))
@unittest.skip("Not fully implemented yet.")
def test_Bincount(self):
x1 = core.Input(name='x1', shape=(None,))
n = nodes.Bincount(name='counter', max_int=3)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=[n.counts])
a = np.array([3, 0, 3, 1])
np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))
np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))
np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))
def test_Concatenate(self):
x1 = core.Input(name='x1', shape=(2, 2))
x2 = core.Input(name='x2', shape=(2, 2))
a = np.array([[3, -1], [3, 7]])
b = np.array([[1, 2], [3, 4]])
for axis in range(2):
n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)
n.input_0.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.input_1.connect(x2)
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=[n.output])
z = fx(a, b)[0]
np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))
def test_Stack(self):
x1 = core.Input(name='x1', shape=(2, 3))
x2 = core.Input(name='x2', shape=(2, 3))
a = np.arange(6).reshape(2, 3)
b = np.arange(6).reshape(2, 3) + 6
for axes in None, (1, 2, 0), (2, 1, 0):
n = nodes.Stack(name='stack', num_inputs=2, axes=axes)
n.input_1.connect(x2)
n.input_0.connect(x1)
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=[n.output])
z = fx(a, b)[0]
expected = np.array([a, b])
if axes:
expected = np.transpose(expected, axes)
np.testing.assert_equal(z, expected)
def test_Dimshuffle(self):
x1 = core.Input(name='x1', shape=(2, 3))
a = np.zeros([2, 3])
axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]
shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]
for ax, shp in zip(axes, shapes):
n = nodes.Dimshuffle('dimshuffle', ax)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0].shape, shp)
def test_Slice(self):
x1 = core.Input(name='x1', shape=(2, 3))
a = np.arange(6).reshape(2, 3)
slices = [(None, 1), (0, None), (1, 0)]
ans = [a[:, 1], a[0, :], a[1, 0]]
for slc, ans in zip(slices, ans):
n = nodes.Slice('slice', slc)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], ans)
def test_Log(self):
x1 = core.Input(name='x1', shape=(2, 2))
log = nodes.Log('log')
log.input.connect(x1)
log.transform()
fx = util.compile(inputs=log.inputs.values(),
outputs=log.outputs.values())
a = np.array([[3, 1], [4, 7]], dtype=np.float32)
z = fx(a)[0]
np.testing.assert_almost_equal(z, np.log(a))
def test_Multiply(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [3, 7]])
for w, shp in zip([-1, a], [None, a.shape]):
n = nodes.Multiply(name='gain', weight_shape=shp)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], np.zeros_like(a))
n.weight.value = w
np.testing.assert_equal(fx(a)[0], w*a)
n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], np.zeros_like(a))
n.weight.value = a[0].reshape(1, -1)
np.testing.assert_equal(fx(a)[0], a*a[0].reshape(1, -1))
def test_Max(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = 7, np.array([4, 7]), np.array([3, 7])
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Max('max', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_Min(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = -1, np.array([3, -1]), np.array([-1, 4])
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Min('min', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_Sum(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = 13, np.array([7, 6]), np.array([2, 11])
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Sum('sum', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_Mean(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Mean('mean', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_NormalizeDim(self):
x1 = core.Input(name='x1', shape=(1, 2, 3))
a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)
expected = [np.sign(a),
a / np.sqrt(np.array([25, 1, 50])).reshape(1, 1, 3),
a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]
for axis, ans in enumerate(expected):
n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_almost_equal(fx(a)[0], ans)
def test_SelectIndex(self):
x1 = core.Input(name='x1', shape=(None, 2))
idx = core.Input(name='idx', shape=(None,), dtype='int32')
a = np.array([[3, -1], [4, 7]])
i = np.array([1, 0])
n = nodes.SelectIndex('select')
n.input.connect(x1)
n.index.connect(idx)
n.transform()
fx = util.compile(inputs=[x1, idx],
outputs=n.outputs.values())
np.testing.assert_equal(fx(a, i)[0], np.array([-1, 4]))
def test_SquaredEuclidean(self):
a1 = np.array([[3, -1], [4, 7]])
b1 = np.array([[1, -1], [4, 7]])
a2 = np.array([3, -1])
b2 = np.array([1, -1])
z1 = np.power(a1 - b1, 2.0).sum(axis=1)
z2 = np.power(a2 - b2, 2.0).sum()
for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):
x1 = core.Input(name='x1', shape=a.shape)
x2 = core.Input(name='x2', shape=b.shape)
n = nodes.SquaredEuclidean('sqeuclid')
n.input_a.connect(x1)
n.input_b.connect(x2)
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=n.outputs.values())
np.testing.assert_equal(fx(a, b)[0], z)
def test_Product(self):
a1 = np.array([[3, -1], [4, 7]])
b1 = np.array([[1, -1], [4, 7]])
a2 = np.array([3, -1])
b2 = np.array([1, -1])
for a, b in zip([a1, a2], [b1, b2]):
x1 = core.Input(name='x1', shape=a.shape)
x2 = core.Input(name='x2', shape=b.shape)
n = nodes.Product('product')
n.input_a.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.input_b.connect(x2)
self.assertTrue(n.is_ready())
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=n.outputs.values())
np.testing.assert_equal(fx(a, b)[0], a*b)
def test_Affine_linear(self):
x1 = core.Input(name='x1', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n.weights.value = w
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)
def test_Affine_relu(self):
x1 = core.Input(name='x1', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='relu')
n.weights.value = w
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))
def test_Affine_dropout(self):
x1 = core.Input(name='x1', shape=(None, 2))
dropout = core.Input(name='dropout', shape=None)
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n.weights.value = w
n.bias.value = b
n.enable_dropout()
n.input.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.dropout.connect(dropout)
n.transform()
fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())
np.testing.assert_equal(fx(a, 0.0)[0], np.dot(a, w) + b)
self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)
def test_Affine_share_params(self):
x = core.Input(name='x1', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n1 = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n2 = nodes.Affine(
name='affine_copy',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n2.share_params(n1)
n1.weights.value = w
n1.bias.value = b
np.testing.assert_equal(n1.weights.value, n2.weights.value)
np.testing.assert_equal(n1.bias.value, n2.bias.value)
n2.input.connect(x)
n2.transform()
fx = util.compile(inputs=[x], outputs=n2.outputs.values())
np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)
n1.weights.value *= 2
np.testing.assert_equal(fx(a)[0], np.dot(a, 2*w) + b)
def test_Conv3D_linear(self):
x1 = core.Input(name='x1', shape=(None, 1, 2, 3))
a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)
w = np.array([[[1], [-2]],
[[-3], [4]],
[[5], [-6]]]).reshape(3, 2, 1)
b = np.arange(3)
# Note that convolutions flip the kernels
z = np.array([[(a*wi[::-1]).sum(axis=0) + bi
for wi, bi in zip(w, b)]])
n = nodes.Conv3D(
name='conv3d',
input_shape=(None, 1, 2, 3),
weight_shape=(3, 1, 2, 1),
act_type='linear')
n.weights.value = w.reshape(3, 1, 2, 1)
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a.reshape(1, 1, 2, 3))[0],
z.reshape(1, 3, 1, 3))
def test_Conv3D_relu(self):
x1 = core.Input(name='x1', shape=(None, 1, 2, 3))
a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)
w = np.array([[[1], [-2]],
[[-3], [4]],
[[5], [-6]]]).reshape(3, 2, 1)
b = np.arange(3)
# Note that convolutions flip the kernels
z = np.array([[(a*wi[::-1]).sum(axis=0) + bi
for wi, bi in zip(w, b)]])
# Reshape from convenience
a = a.reshape(1, 1, 2, 3)
z = z.reshape(1, 3, 1, 3)
n = nodes.Conv3D(
name='conv3d',
input_shape=(None, 1, 2, 3),
weight_shape=(3, 1, 2, 1),
act_type='relu')
n.weights.value = w.reshape(3, 1, 2, 1)
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], __relu__(z))
def test_Conv3D_dropout(self):
x1 = core.Input(name='x1', shape=(None, 1, 2, 3))
dropout = core.Input(name='dropout', shape=None)
a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)
w = np.array([[[1], [-2]],
[[-3], [4]],
[[5], [-6]]]).reshape(3, 2, 1)
b = np.arange(3)
# Note that convolutions flip the kernels
z = np.array([[(a*wi[::-1]).sum(axis=0) + bi
for wi, bi in zip(w, b)]])
# Reshape from convenience
a = a.reshape(1, 1, 2, 3)
z = z.reshape(1, 3, 1, 3)
n = nodes.Conv3D(
name='conv3d',
input_shape=(None, 1, 2, 3),
weight_shape=(3, 1, 2, 1),
act_type='linear')
n.enable_dropout()
n.weights.value = w.reshape(3, 1, 2, 1)
n.bias.value = b
n.input.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.dropout.connect(dropout)
n.transform()
fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())
np.testing.assert_equal(fx(a, 0.0)[0], z)
self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)
def test_RadialBasis(self):
x = core.Input(name='x', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
n = nodes.RadialBasis(
name='radial',
input_shape=x.shape,
output_shape=(None, 3))
n.weights.value = w.reshape(2, 3)
n.input.connect(x)
n.transform()
fx = util.compile(inputs=[x], outputs=n.outputs.values())
z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3),
2.0).sum(axis=1)
np.testing.assert_equal(fx(a)[0], z)
def test_SliceGT(self):
x = core.Input(name='x', shape=(None,))
n = nodes.SliceGT(name='slice-greater', value=0)
n.input.connect(x)
n.transform()
fx = util.compile(inputs=[x], outputs=n.outputs.values())
a = np.array([1, -2, 0])
np.testing.assert_equal(fx(a)[0], np.array([1]))
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "8e74bd0c051b672bf22c2c8dfb03760805b105c5",
"index": 8799,
"step-1": "<mask token>\n\n\nclass NodeTests(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n <mask token>\n <mask token>\n\n def test_Add(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n n = nodes.Add(name='accumulate', num_inputs=2)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n self.assertIsNone(n.output.shape)\n n.transform()\n self.assertEqual(n.output.shape, (2, 2))\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))\n\n @unittest.skip('Not fully implemented yet.')\n def test_Bincount(self):\n x1 = core.Input(name='x1', shape=(None,))\n n = nodes.Bincount(name='counter', max_int=3)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=[n.counts])\n a = np.array([3, 0, 3, 1])\n np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))\n np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))\n np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))\n\n def test_Concatenate(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n for axis in range(2):\n n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))\n\n def test_Stack(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n x2 = core.Input(name='x2', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n b = np.arange(6).reshape(2, 3) + 6\n for axes in (None, (1, 2, 0), (2, 1, 0)):\n n = nodes.Stack(name='stack', num_inputs=2, axes=axes)\n n.input_1.connect(x2)\n n.input_0.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n expected = np.array([a, b])\n if axes:\n expected = np.transpose(expected, axes)\n np.testing.assert_equal(z, expected)\n\n def test_Dimshuffle(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.zeros([2, 3])\n axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]\n shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]\n for ax, shp in zip(axes, shapes):\n n = nodes.Dimshuffle('dimshuffle', ax)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0].shape, shp)\n\n def test_Slice(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n slices = [(None, 1), (0, None), (1, 0)]\n ans = [a[:, 1], a[0, :], a[1, 0]]\n for slc, ans in zip(slices, ans):\n n = nodes.Slice('slice', slc)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], ans)\n <mask token>\n\n def test_Multiply(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n for w, shp in zip([-1, a], [None, a.shape]):\n n = nodes.Multiply(name='gain', weight_shape=shp)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = w\n np.testing.assert_equal(fx(a)[0], w * a)\n n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = a[0].reshape(1, -1)\n np.testing.assert_equal(fx(a)[0], a * a[0].reshape(1, -1))\n <mask token>\n\n def test_Min(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = -1, np.array([3, -1]), np.array([-1, 4])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Min('min', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Sum(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13, np.array([7, 6]), np.array([2, 11])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Sum('sum', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Mean(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Mean('mean', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_NormalizeDim(self):\n x1 = core.Input(name='x1', shape=(1, 2, 3))\n a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)\n expected = [np.sign(a), a / np.sqrt(np.array([25, 1, 50])).reshape(\n 1, 1, 3), a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]\n for axis, ans in enumerate(expected):\n n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_almost_equal(fx(a)[0], ans)\n <mask token>\n\n def test_SquaredEuclidean(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n z1 = np.power(a1 - b1, 2.0).sum(axis=1)\n z2 = np.power(a2 - b2, 2.0).sum()\n for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.SquaredEuclidean('sqeuclid')\n n.input_a.connect(x1)\n n.input_b.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], z)\n\n def test_Product(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n for a, b in zip([a1, a2], [b1, b2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.Product('product')\n n.input_a.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_b.connect(x2)\n self.assertTrue(n.is_ready())\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], a * b)\n <mask token>\n\n def test_Affine_relu(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n n = nodes.Affine(name='affine', input_shape=(None, 2), output_shape\n =(None, 3), act_type='relu')\n n.weights.value = w\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))\n <mask token>\n <mask token>\n <mask token>\n\n def test_Conv3D_relu(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='relu')\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(z))\n\n def test_Conv3D_dropout(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n dropout = core.Input(name='dropout', shape=None)\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='linear')\n n.enable_dropout()\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.dropout.connect(dropout)\n n.transform()\n fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, 0.0)[0], z)\n self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)\n\n def test_RadialBasis(self):\n x = core.Input(name='x', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n n = nodes.RadialBasis(name='radial', input_shape=x.shape,\n output_shape=(None, 3))\n n.weights.value = w.reshape(2, 3)\n n.input.connect(x)\n n.transform()\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3), 2.0).sum(axis=1)\n np.testing.assert_equal(fx(a)[0], z)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NodeTests(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n\n def test_Node(self):\n pass\n <mask token>\n\n def test_Add(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n n = nodes.Add(name='accumulate', num_inputs=2)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n self.assertIsNone(n.output.shape)\n n.transform()\n self.assertEqual(n.output.shape, (2, 2))\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))\n\n @unittest.skip('Not fully implemented yet.')\n def test_Bincount(self):\n x1 = core.Input(name='x1', shape=(None,))\n n = nodes.Bincount(name='counter', max_int=3)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=[n.counts])\n a = np.array([3, 0, 3, 1])\n np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))\n np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))\n np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))\n\n def test_Concatenate(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n for axis in range(2):\n n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))\n\n def test_Stack(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n x2 = core.Input(name='x2', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n b = np.arange(6).reshape(2, 3) + 6\n for axes in (None, (1, 2, 0), (2, 1, 0)):\n n = nodes.Stack(name='stack', num_inputs=2, axes=axes)\n n.input_1.connect(x2)\n n.input_0.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n expected = np.array([a, b])\n if axes:\n expected = np.transpose(expected, axes)\n np.testing.assert_equal(z, expected)\n\n def test_Dimshuffle(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.zeros([2, 3])\n axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]\n shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]\n for ax, shp in zip(axes, shapes):\n n = nodes.Dimshuffle('dimshuffle', ax)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0].shape, shp)\n\n def test_Slice(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n slices = [(None, 1), (0, None), (1, 0)]\n ans = [a[:, 1], a[0, :], a[1, 0]]\n for slc, ans in zip(slices, ans):\n n = nodes.Slice('slice', slc)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], ans)\n <mask token>\n\n def test_Multiply(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n for w, shp in zip([-1, a], [None, a.shape]):\n n = nodes.Multiply(name='gain', weight_shape=shp)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = w\n np.testing.assert_equal(fx(a)[0], w * a)\n n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = a[0].reshape(1, -1)\n np.testing.assert_equal(fx(a)[0], a * a[0].reshape(1, -1))\n <mask token>\n\n def test_Min(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = -1, np.array([3, -1]), np.array([-1, 4])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Min('min', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Sum(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13, np.array([7, 6]), np.array([2, 11])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Sum('sum', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Mean(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Mean('mean', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_NormalizeDim(self):\n x1 = core.Input(name='x1', shape=(1, 2, 3))\n a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)\n expected = [np.sign(a), a / np.sqrt(np.array([25, 1, 50])).reshape(\n 1, 1, 3), a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]\n for axis, ans in enumerate(expected):\n n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_almost_equal(fx(a)[0], ans)\n <mask token>\n\n def test_SquaredEuclidean(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n z1 = np.power(a1 - b1, 2.0).sum(axis=1)\n z2 = np.power(a2 - b2, 2.0).sum()\n for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.SquaredEuclidean('sqeuclid')\n n.input_a.connect(x1)\n n.input_b.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], z)\n\n def test_Product(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n for a, b in zip([a1, a2], [b1, b2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.Product('product')\n n.input_a.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_b.connect(x2)\n self.assertTrue(n.is_ready())\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], a * b)\n <mask token>\n\n def test_Affine_relu(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n n = nodes.Affine(name='affine', input_shape=(None, 2), output_shape\n =(None, 3), act_type='relu')\n n.weights.value = w\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))\n <mask token>\n <mask token>\n <mask token>\n\n def test_Conv3D_relu(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='relu')\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(z))\n\n def test_Conv3D_dropout(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n dropout = core.Input(name='dropout', shape=None)\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='linear')\n n.enable_dropout()\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.dropout.connect(dropout)\n n.transform()\n fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, 0.0)[0], z)\n self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)\n\n def test_RadialBasis(self):\n x = core.Input(name='x', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n n = nodes.RadialBasis(name='radial', input_shape=x.shape,\n output_shape=(None, 3))\n n.weights.value = w.reshape(2, 3)\n n.input.connect(x)\n n.transform()\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3), 2.0).sum(axis=1)\n np.testing.assert_equal(fx(a)[0], z)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NodeTests(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n\n def test_Node(self):\n pass\n <mask token>\n\n def test_Add(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n n = nodes.Add(name='accumulate', num_inputs=2)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n self.assertIsNone(n.output.shape)\n n.transform()\n self.assertEqual(n.output.shape, (2, 2))\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))\n\n @unittest.skip('Not fully implemented yet.')\n def test_Bincount(self):\n x1 = core.Input(name='x1', shape=(None,))\n n = nodes.Bincount(name='counter', max_int=3)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=[n.counts])\n a = np.array([3, 0, 3, 1])\n np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))\n np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))\n np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))\n\n def test_Concatenate(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n for axis in range(2):\n n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))\n\n def test_Stack(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n x2 = core.Input(name='x2', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n b = np.arange(6).reshape(2, 3) + 6\n for axes in (None, (1, 2, 0), (2, 1, 0)):\n n = nodes.Stack(name='stack', num_inputs=2, axes=axes)\n n.input_1.connect(x2)\n n.input_0.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n expected = np.array([a, b])\n if axes:\n expected = np.transpose(expected, axes)\n np.testing.assert_equal(z, expected)\n\n def test_Dimshuffle(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.zeros([2, 3])\n axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]\n shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]\n for ax, shp in zip(axes, shapes):\n n = nodes.Dimshuffle('dimshuffle', ax)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0].shape, shp)\n\n def test_Slice(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n slices = [(None, 1), (0, None), (1, 0)]\n ans = [a[:, 1], a[0, :], a[1, 0]]\n for slc, ans in zip(slices, ans):\n n = nodes.Slice('slice', slc)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], ans)\n <mask token>\n\n def test_Multiply(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n for w, shp in zip([-1, a], [None, a.shape]):\n n = nodes.Multiply(name='gain', weight_shape=shp)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = w\n np.testing.assert_equal(fx(a)[0], w * a)\n n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = a[0].reshape(1, -1)\n np.testing.assert_equal(fx(a)[0], a * a[0].reshape(1, -1))\n\n def test_Max(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 7, np.array([4, 7]), np.array([3, 7])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Max('max', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Min(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = -1, np.array([3, -1]), np.array([-1, 4])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Min('min', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Sum(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13, np.array([7, 6]), np.array([2, 11])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Sum('sum', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Mean(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Mean('mean', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_NormalizeDim(self):\n x1 = core.Input(name='x1', shape=(1, 2, 3))\n a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)\n expected = [np.sign(a), a / np.sqrt(np.array([25, 1, 50])).reshape(\n 1, 1, 3), a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]\n for axis, ans in enumerate(expected):\n n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_almost_equal(fx(a)[0], ans)\n\n def test_SelectIndex(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n idx = core.Input(name='idx', shape=(None,), dtype='int32')\n a = np.array([[3, -1], [4, 7]])\n i = np.array([1, 0])\n n = nodes.SelectIndex('select')\n n.input.connect(x1)\n n.index.connect(idx)\n n.transform()\n fx = util.compile(inputs=[x1, idx], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, i)[0], np.array([-1, 4]))\n\n def test_SquaredEuclidean(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n z1 = np.power(a1 - b1, 2.0).sum(axis=1)\n z2 = np.power(a2 - b2, 2.0).sum()\n for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.SquaredEuclidean('sqeuclid')\n n.input_a.connect(x1)\n n.input_b.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], z)\n\n def test_Product(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n for a, b in zip([a1, a2], [b1, b2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.Product('product')\n n.input_a.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_b.connect(x2)\n self.assertTrue(n.is_ready())\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], a * b)\n\n def test_Affine_linear(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n n = nodes.Affine(name='affine', input_shape=(None, 2), output_shape\n =(None, 3), act_type='linear')\n n.weights.value = w\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)\n\n def test_Affine_relu(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n n = nodes.Affine(name='affine', input_shape=(None, 2), output_shape\n =(None, 3), act_type='relu')\n n.weights.value = w\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))\n <mask token>\n <mask token>\n <mask token>\n\n def test_Conv3D_relu(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='relu')\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(z))\n\n def test_Conv3D_dropout(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n dropout = core.Input(name='dropout', shape=None)\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='linear')\n n.enable_dropout()\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.dropout.connect(dropout)\n n.transform()\n fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, 0.0)[0], z)\n self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)\n\n def test_RadialBasis(self):\n x = core.Input(name='x', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n n = nodes.RadialBasis(name='radial', input_shape=x.shape,\n output_shape=(None, 3))\n n.weights.value = w.reshape(2, 3)\n n.input.connect(x)\n n.transform()\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3), 2.0).sum(axis=1)\n np.testing.assert_equal(fx(a)[0], z)\n <mask token>\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass NodeTests(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n\n def test_Node(self):\n pass\n\n def test_Constant(self):\n n = nodes.Constant(name='test', shape=None)\n n.data.value = 1.0\n n.transform()\n fx = util.compile(inputs=[], outputs=[n.output])\n np.testing.assert_equal(np.array(fx()[0]), 1.0)\n\n def test_Add(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n n = nodes.Add(name='accumulate', num_inputs=2)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n self.assertIsNone(n.output.shape)\n n.transform()\n self.assertEqual(n.output.shape, (2, 2))\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))\n\n @unittest.skip('Not fully implemented yet.')\n def test_Bincount(self):\n x1 = core.Input(name='x1', shape=(None,))\n n = nodes.Bincount(name='counter', max_int=3)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=[n.counts])\n a = np.array([3, 0, 3, 1])\n np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))\n np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))\n np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))\n\n def test_Concatenate(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n for axis in range(2):\n n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))\n\n def test_Stack(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n x2 = core.Input(name='x2', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n b = np.arange(6).reshape(2, 3) + 6\n for axes in (None, (1, 2, 0), (2, 1, 0)):\n n = nodes.Stack(name='stack', num_inputs=2, axes=axes)\n n.input_1.connect(x2)\n n.input_0.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=[n.output])\n z = fx(a, b)[0]\n expected = np.array([a, b])\n if axes:\n expected = np.transpose(expected, axes)\n np.testing.assert_equal(z, expected)\n\n def test_Dimshuffle(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.zeros([2, 3])\n axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]\n shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]\n for ax, shp in zip(axes, shapes):\n n = nodes.Dimshuffle('dimshuffle', ax)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0].shape, shp)\n\n def test_Slice(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n slices = [(None, 1), (0, None), (1, 0)]\n ans = [a[:, 1], a[0, :], a[1, 0]]\n for slc, ans in zip(slices, ans):\n n = nodes.Slice('slice', slc)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], ans)\n <mask token>\n\n def test_Multiply(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n for w, shp in zip([-1, a], [None, a.shape]):\n n = nodes.Multiply(name='gain', weight_shape=shp)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = w\n np.testing.assert_equal(fx(a)[0], w * a)\n n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n n.weight.value = a[0].reshape(1, -1)\n np.testing.assert_equal(fx(a)[0], a * a[0].reshape(1, -1))\n\n def test_Max(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 7, np.array([4, 7]), np.array([3, 7])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Max('max', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Min(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = -1, np.array([3, -1]), np.array([-1, 4])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Min('min', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Sum(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13, np.array([7, 6]), np.array([2, 11])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Sum('sum', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Mean(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Mean('mean', axis=axis)\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_NormalizeDim(self):\n x1 = core.Input(name='x1', shape=(1, 2, 3))\n a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)\n expected = [np.sign(a), a / np.sqrt(np.array([25, 1, 50])).reshape(\n 1, 1, 3), a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]\n for axis, ans in enumerate(expected):\n n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=n.inputs.values(), outputs=n.outputs.\n values())\n np.testing.assert_almost_equal(fx(a)[0], ans)\n\n def test_SelectIndex(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n idx = core.Input(name='idx', shape=(None,), dtype='int32')\n a = np.array([[3, -1], [4, 7]])\n i = np.array([1, 0])\n n = nodes.SelectIndex('select')\n n.input.connect(x1)\n n.index.connect(idx)\n n.transform()\n fx = util.compile(inputs=[x1, idx], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, i)[0], np.array([-1, 4]))\n\n def test_SquaredEuclidean(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n z1 = np.power(a1 - b1, 2.0).sum(axis=1)\n z2 = np.power(a2 - b2, 2.0).sum()\n for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.SquaredEuclidean('sqeuclid')\n n.input_a.connect(x1)\n n.input_b.connect(x2)\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], z)\n\n def test_Product(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n for a, b in zip([a1, a2], [b1, b2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.Product('product')\n n.input_a.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_b.connect(x2)\n self.assertTrue(n.is_ready())\n n.transform()\n fx = util.compile(inputs=[x1, x2], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], a * b)\n\n def test_Affine_linear(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n n = nodes.Affine(name='affine', input_shape=(None, 2), output_shape\n =(None, 3), act_type='linear')\n n.weights.value = w\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)\n\n def test_Affine_relu(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n n = nodes.Affine(name='affine', input_shape=(None, 2), output_shape\n =(None, 3), act_type='relu')\n n.weights.value = w\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))\n <mask token>\n <mask token>\n <mask token>\n\n def test_Conv3D_relu(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='relu')\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n n.transform()\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(z))\n\n def test_Conv3D_dropout(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n dropout = core.Input(name='dropout', shape=None)\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]], [[-3], [4]], [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n z = np.array([[((a * wi[::-1]).sum(axis=0) + bi) for wi, bi in zip(\n w, b)]])\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n n = nodes.Conv3D(name='conv3d', input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1), act_type='linear')\n n.enable_dropout()\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n n.input.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.dropout.connect(dropout)\n n.transform()\n fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, 0.0)[0], z)\n self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)\n\n def test_RadialBasis(self):\n x = core.Input(name='x', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n n = nodes.RadialBasis(name='radial', input_shape=x.shape,\n output_shape=(None, 3))\n n.weights.value = w.reshape(2, 3)\n n.input.connect(x)\n n.transform()\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3), 2.0).sum(axis=1)\n np.testing.assert_equal(fx(a)[0], z)\n\n def test_SliceGT(self):\n x = core.Input(name='x', shape=(None,))\n n = nodes.SliceGT(name='slice-greater', value=0)\n n.input.connect(x)\n n.transform()\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n a = np.array([1, -2, 0])\n np.testing.assert_equal(fx(a)[0], np.array([1]))\n\n\n<mask token>\n",
"step-5": "\"\"\"Tests for Node objects.\"\"\"\n\nimport numpy as np\nimport unittest\n\nimport optimus.core as core\nimport optimus.nodes as nodes\nimport optimus.util as util\n\n\ndef __relu__(x):\n \"Numpy Rectified Linear Unit.\"\n return 0.5 * (np.abs(x) + x)\n\n\nclass NodeTests(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_Node(self):\n pass\n\n def test_Constant(self):\n n = nodes.Constant(name='test', shape=None)\n n.data.value = 1.0\n\n n.transform()\n fx = util.compile(inputs=[], outputs=[n.output])\n\n np.testing.assert_equal(np.array(fx()[0]), 1.0)\n\n def test_Add(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n\n n = nodes.Add(name='accumulate', num_inputs=2)\n n.input_0.connect(x1)\n\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n\n n.input_1.connect(x2)\n self.assertIsNone(n.output.shape)\n n.transform()\n self.assertEqual(n.output.shape, (2, 2))\n\n fx = util.compile(inputs=[x1, x2],\n outputs=[n.output])\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))\n\n @unittest.skip(\"Not fully implemented yet.\")\n def test_Bincount(self):\n x1 = core.Input(name='x1', shape=(None,))\n\n n = nodes.Bincount(name='counter', max_int=3)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=[x1], outputs=[n.counts])\n a = np.array([3, 0, 3, 1])\n\n np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))\n np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))\n np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))\n\n def test_Concatenate(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n x2 = core.Input(name='x2', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n b = np.array([[1, 2], [3, 4]])\n\n for axis in range(2):\n n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)\n n.input_0.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_1.connect(x2)\n n.transform()\n\n fx = util.compile(inputs=[x1, x2],\n outputs=[n.output])\n\n z = fx(a, b)[0]\n np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))\n\n def test_Stack(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n x2 = core.Input(name='x2', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n b = np.arange(6).reshape(2, 3) + 6\n\n for axes in None, (1, 2, 0), (2, 1, 0):\n n = nodes.Stack(name='stack', num_inputs=2, axes=axes)\n n.input_1.connect(x2)\n n.input_0.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=[x1, x2],\n outputs=[n.output])\n\n z = fx(a, b)[0]\n expected = np.array([a, b])\n if axes:\n expected = np.transpose(expected, axes)\n np.testing.assert_equal(z, expected)\n\n def test_Dimshuffle(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.zeros([2, 3])\n axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]\n shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]\n for ax, shp in zip(axes, shapes):\n n = nodes.Dimshuffle('dimshuffle', ax)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0].shape, shp)\n\n def test_Slice(self):\n x1 = core.Input(name='x1', shape=(2, 3))\n a = np.arange(6).reshape(2, 3)\n slices = [(None, 1), (0, None), (1, 0)]\n ans = [a[:, 1], a[0, :], a[1, 0]]\n for slc, ans in zip(slices, ans):\n n = nodes.Slice('slice', slc)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], ans)\n\n def test_Log(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n log = nodes.Log('log')\n log.input.connect(x1)\n log.transform()\n\n fx = util.compile(inputs=log.inputs.values(),\n outputs=log.outputs.values())\n\n a = np.array([[3, 1], [4, 7]], dtype=np.float32)\n z = fx(a)[0]\n np.testing.assert_almost_equal(z, np.log(a))\n\n def test_Multiply(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [3, 7]])\n\n for w, shp in zip([-1, a], [None, a.shape]):\n n = nodes.Multiply(name='gain', weight_shape=shp)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n\n n.weight.value = w\n np.testing.assert_equal(fx(a)[0], w*a)\n\n n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], np.zeros_like(a))\n\n n.weight.value = a[0].reshape(1, -1)\n np.testing.assert_equal(fx(a)[0], a*a[0].reshape(1, -1))\n\n def test_Max(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 7, np.array([4, 7]), np.array([3, 7])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Max('max', axis=axis)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Min(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = -1, np.array([3, -1]), np.array([-1, 4])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Min('min', axis=axis)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Sum(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13, np.array([7, 6]), np.array([2, 11])\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Sum('sum', axis=axis)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_Mean(self):\n x1 = core.Input(name='x1', shape=(2, 2))\n a = np.array([[3, -1], [4, 7]])\n res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0\n for idx, axis in enumerate([None, 0, 1]):\n n = nodes.Mean('mean', axis=axis)\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a)[0], res[idx])\n\n def test_NormalizeDim(self):\n x1 = core.Input(name='x1', shape=(1, 2, 3))\n a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)\n expected = [np.sign(a),\n a / np.sqrt(np.array([25, 1, 50])).reshape(1, 1, 3),\n a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]\n for axis, ans in enumerate(expected):\n n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=n.inputs.values(),\n outputs=n.outputs.values())\n np.testing.assert_almost_equal(fx(a)[0], ans)\n\n def test_SelectIndex(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n idx = core.Input(name='idx', shape=(None,), dtype='int32')\n a = np.array([[3, -1], [4, 7]])\n i = np.array([1, 0])\n\n n = nodes.SelectIndex('select')\n n.input.connect(x1)\n n.index.connect(idx)\n n.transform()\n\n fx = util.compile(inputs=[x1, idx],\n outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a, i)[0], np.array([-1, 4]))\n\n def test_SquaredEuclidean(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n\n z1 = np.power(a1 - b1, 2.0).sum(axis=1)\n z2 = np.power(a2 - b2, 2.0).sum()\n for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.SquaredEuclidean('sqeuclid')\n n.input_a.connect(x1)\n n.input_b.connect(x2)\n n.transform()\n\n fx = util.compile(inputs=[x1, x2],\n outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], z)\n\n def test_Product(self):\n a1 = np.array([[3, -1], [4, 7]])\n b1 = np.array([[1, -1], [4, 7]])\n a2 = np.array([3, -1])\n b2 = np.array([1, -1])\n\n for a, b in zip([a1, a2], [b1, b2]):\n x1 = core.Input(name='x1', shape=a.shape)\n x2 = core.Input(name='x2', shape=b.shape)\n n = nodes.Product('product')\n n.input_a.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.input_b.connect(x2)\n self.assertTrue(n.is_ready())\n n.transform()\n\n fx = util.compile(inputs=[x1, x2],\n outputs=n.outputs.values())\n np.testing.assert_equal(fx(a, b)[0], a*b)\n\n def test_Affine_linear(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n\n n = nodes.Affine(\n name='affine',\n input_shape=(None, 2),\n output_shape=(None, 3),\n act_type='linear')\n n.weights.value = w\n n.bias.value = b\n\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)\n\n def test_Affine_relu(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n\n n = nodes.Affine(\n name='affine',\n input_shape=(None, 2),\n output_shape=(None, 3),\n act_type='relu')\n n.weights.value = w\n n.bias.value = b\n\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))\n\n def test_Affine_dropout(self):\n x1 = core.Input(name='x1', shape=(None, 2))\n dropout = core.Input(name='dropout', shape=None)\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n\n n = nodes.Affine(\n name='affine',\n input_shape=(None, 2),\n output_shape=(None, 3),\n act_type='linear')\n n.weights.value = w\n n.bias.value = b\n n.enable_dropout()\n\n n.input.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.dropout.connect(dropout)\n n.transform()\n\n fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a, 0.0)[0], np.dot(a, w) + b)\n self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)\n\n def test_Affine_share_params(self):\n x = core.Input(name='x1', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n b = np.ones(3)\n\n n1 = nodes.Affine(\n name='affine',\n input_shape=(None, 2),\n output_shape=(None, 3),\n act_type='linear')\n\n n2 = nodes.Affine(\n name='affine_copy',\n input_shape=(None, 2),\n output_shape=(None, 3),\n act_type='linear')\n\n n2.share_params(n1)\n\n n1.weights.value = w\n n1.bias.value = b\n\n np.testing.assert_equal(n1.weights.value, n2.weights.value)\n np.testing.assert_equal(n1.bias.value, n2.bias.value)\n\n n2.input.connect(x)\n n2.transform()\n\n fx = util.compile(inputs=[x], outputs=n2.outputs.values())\n np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)\n\n n1.weights.value *= 2\n np.testing.assert_equal(fx(a)[0], np.dot(a, 2*w) + b)\n\n def test_Conv3D_linear(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]],\n [[-3], [4]],\n [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n\n # Note that convolutions flip the kernels\n z = np.array([[(a*wi[::-1]).sum(axis=0) + bi\n for wi, bi in zip(w, b)]])\n\n n = nodes.Conv3D(\n name='conv3d',\n input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1),\n act_type='linear')\n\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a.reshape(1, 1, 2, 3))[0],\n z.reshape(1, 3, 1, 3))\n\n def test_Conv3D_relu(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]],\n [[-3], [4]],\n [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n\n # Note that convolutions flip the kernels\n z = np.array([[(a*wi[::-1]).sum(axis=0) + bi\n for wi, bi in zip(w, b)]])\n\n # Reshape from convenience\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n\n n = nodes.Conv3D(\n name='conv3d',\n input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1),\n act_type='relu')\n\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n\n n.input.connect(x1)\n n.transform()\n\n fx = util.compile(inputs=[x1], outputs=n.outputs.values())\n np.testing.assert_equal(fx(a)[0], __relu__(z))\n\n def test_Conv3D_dropout(self):\n x1 = core.Input(name='x1', shape=(None, 1, 2, 3))\n dropout = core.Input(name='dropout', shape=None)\n a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)\n w = np.array([[[1], [-2]],\n [[-3], [4]],\n [[5], [-6]]]).reshape(3, 2, 1)\n b = np.arange(3)\n\n # Note that convolutions flip the kernels\n z = np.array([[(a*wi[::-1]).sum(axis=0) + bi\n for wi, bi in zip(w, b)]])\n\n # Reshape from convenience\n a = a.reshape(1, 1, 2, 3)\n z = z.reshape(1, 3, 1, 3)\n\n n = nodes.Conv3D(\n name='conv3d',\n input_shape=(None, 1, 2, 3),\n weight_shape=(3, 1, 2, 1),\n act_type='linear')\n\n n.enable_dropout()\n n.weights.value = w.reshape(3, 1, 2, 1)\n n.bias.value = b\n\n n.input.connect(x1)\n with self.assertRaises(nodes.UnconnectedNodeError):\n n.transform()\n n.dropout.connect(dropout)\n n.transform()\n\n fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())\n\n np.testing.assert_equal(fx(a, 0.0)[0], z)\n self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)\n\n def test_RadialBasis(self):\n x = core.Input(name='x', shape=(None, 2))\n a = np.array([[3, -1], [4, 7]])\n w = np.array([[1, -1], [2, -2], [3, -3]]).T\n\n n = nodes.RadialBasis(\n name='radial',\n input_shape=x.shape,\n output_shape=(None, 3))\n n.weights.value = w.reshape(2, 3)\n n.input.connect(x)\n n.transform()\n\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3),\n 2.0).sum(axis=1)\n np.testing.assert_equal(fx(a)[0], z)\n\n def test_SliceGT(self):\n x = core.Input(name='x', shape=(None,))\n\n n = nodes.SliceGT(name='slice-greater', value=0)\n n.input.connect(x)\n n.transform()\n\n fx = util.compile(inputs=[x], outputs=n.outputs.values())\n a = np.array([1, -2, 0])\n np.testing.assert_equal(fx(a)[0], np.array([1]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
19,
20,
23,
25,
34
]
}
|
[
19,
20,
23,
25,
34
] |
"""
Write a program that prompts for the user’s favorite number.
Use json.dump() to store this number in a file. Write a separate program that reads in this value and
prints the message, “I know your favorite number! It’s _____.”
"""
import json
file_name = 'supporting_files/favourite_number.json'
favourite_number = input('Enter you favourite number')
with open(file_name, 'a') as file_object:
json.dump(favourite_number, file_object)
print(f'{favourite_number} is saved in {file_name}')
|
normal
|
{
"blob_id": "7a359d4b31bd1fd35cd1a9a1de4cbf4635e23def",
"index": 7932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n",
"step-3": "<mask token>\nfile_name = 'supporting_files/favourite_number.json'\nfavourite_number = input('Enter you favourite number')\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n",
"step-4": "<mask token>\nimport json\nfile_name = 'supporting_files/favourite_number.json'\nfavourite_number = input('Enter you favourite number')\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n",
"step-5": "\"\"\"\nWrite a program that prompts for the user’s favorite number.\nUse json.dump() to store this number in a file. Write a separate program that reads in this value and\nprints the message, “I know your favorite number! It’s _____.”\n\"\"\"\n\nimport json\n\nfile_name = 'supporting_files/favourite_number.json'\nfavourite_number = input('Enter you favourite number')\n\nwith open(file_name, 'a') as file_object:\n json.dump(favourite_number, file_object)\nprint(f'{favourite_number} is saved in {file_name}')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Prestamo(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Prestamo(models.Model):
Fecha_Prestamo = models.DateTimeField(default=timezone.now)
Fecha_Devolucion = models.DateField()
Fecha_Devolucion_Real = models.DateField()
Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)
Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Libros(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto = models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo(models.Model):
Fecha_Prestamo = models.DateTimeField(default=timezone.now)
Fecha_Devolucion = models.DateField()
Fecha_Devolucion_Real = models.DateField()
Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)
Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Libros(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.Titulo
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto = models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo(models.Model):
Fecha_Prestamo = models.DateTimeField(default=timezone.now)
Fecha_Devolucion = models.DateField()
Fecha_Devolucion_Real = models.DateField()
Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)
Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
<|reserved_special_token_1|>
from django.db import models
from django.contrib import admin
from django.utils import timezone
class Libros(models.Model):
ISBN = models.CharField(max_length=13,primary_key=True)
Titulo = models.CharField(max_length=15)
# Portada = models.ImageField(upload_to='imagen/')
Autor = models.CharField(max_length=100)
Editorial = models.CharField(max_length=100)
Pais=models.CharField(max_length=100)
anno= models.IntegerField()
def __str__(self):
return self.Titulo
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto= models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo (models.Model):
Fecha_Prestamo=models.DateTimeField(default=timezone.now)
Fecha_Devolucion=models.DateField()
Fecha_Devolucion_Real=models.DateField()
Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)
Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model=Prestamo
extra=1
class LibroAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
class UsuarioAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
|
flexible
|
{
"blob_id": "86fdea2ae8e253aa4639bb3114de70c693536760",
"index": 1046,
"step-1": "<mask token>\n\n\nclass Prestamo(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-2": "<mask token>\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-3": "<mask token>\n\n\nclass Libros(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-4": "<mask token>\n\n\nclass Libros(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.Titulo\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-5": "from django.db import models\nfrom django.contrib import admin\nfrom django.utils import timezone\n\nclass Libros(models.Model):\n ISBN = models.CharField(max_length=13,primary_key=True)\n Titulo = models.CharField(max_length=15)\n # Portada = models.ImageField(upload_to='imagen/')\n Autor = models.CharField(max_length=100)\n Editorial = models.CharField(max_length=100)\n Pais=models.CharField(max_length=100)\n anno= models.IntegerField()\n\n def __str__(self):\n return self.Titulo\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto= models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\n\nclass Prestamo (models.Model):\n Fecha_Prestamo=models.DateTimeField(default=timezone.now)\n Fecha_Devolucion=models.DateField()\n Fecha_Devolucion_Real=models.DateField()\n Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)\n Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)\n\nclass PrestamoInLine(admin.TabularInline):\n model=Prestamo\n extra=1\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = (PrestamoInLine,)\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = (PrestamoInLine,)\n",
"step-ids": [
7,
8,
12,
13,
16
]
}
|
[
7,
8,
12,
13,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Generator:
<|reserved_special_token_0|>
@staticmethod
def generate(level):
"""
根据 level 生成指定等级的算术题
0:小学;1:初中;2:高中
"""
"""
生成操作数序列以及二元运算符序列
"""
length = randint(0 if level else 1, 4)
op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]
numArr = [randint(1, 100) for i in range(length + 1)]
"""
生成二元运算符的位置
"""
remain = 1
position = []
for i in range(length):
position.append(randint(0, remain))
remain += 1 - position[i]
if remain > 1:
position[-1] += remain - 1
"""
生成一元运算符序列
"""
op1Arr = []
if level:
if level == 1:
op1Arr.append(Generator.opset[randint(4, 5)])
elif level == 2:
op1Arr.append(Generator.opset[randint(6, 8)])
for i in range(randint(0, level)):
op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else
8)])
shuffle(op1Arr)
"""
生成后缀表达式
"""
expression = numArr
offset = 2
index = 0
for i in range(length):
for j in range(position[i]):
expression.insert(i + j + offset, op2Arr[index])
index += 1
offset += position[i]
for op in op1Arr:
expression.insert(randint(1, len(expression)), op)
def getPriority(item):
"""
返回运算符或操作数的优先级
操作数:0
一元运算符:1
'*'、'/':2
'+'、'-':3
"""
if isinstance(item, int):
return 0
elif item == '+' or item == '-':
return 3
elif item == '*' or item == '/':
return 2
else:
return 1
"""
转换成中缀表达式
stack 存储 (expression, priority)
"""
stack = []
for e in expression:
priority = getPriority(e)
if priority == 0:
"""
是一个操作数,直接入栈
"""
stack.append((e, 0))
elif priority == 3:
"""
是加/减运算,优先级最低,拼接后直接入栈
"""
item2 = stack.pop()[0]
item1 = stack.pop()[0]
stack.append(('%s%s%s' % (item1, e, item2), 3))
elif priority == 2:
"""
是乘/除运算,如果有加/减运算需要加括号
"""
item2, prio2 = stack.pop()
if prio2 > 2:
item2 = '(%s)' % item2
item1, prio1 = stack.pop()
if prio1 > 2:
item1 = '(%s)' % item1
stack.append(('%s%s%s' % (item1, e, item2), 2))
elif priority == 1:
"""
是一元运算,除了操作数都要加括号
"""
item, prio = stack.pop()
if prio:
item = '(%s)' % item
if e == '²':
stack.append(('%s%s' % (item, '²'), 1))
else:
stack.append(('%s%s' % (e, item), 1))
return stack[0][0]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Generator:
opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']
@staticmethod
def generate(level):
"""
根据 level 生成指定等级的算术题
0:小学;1:初中;2:高中
"""
"""
生成操作数序列以及二元运算符序列
"""
length = randint(0 if level else 1, 4)
op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]
numArr = [randint(1, 100) for i in range(length + 1)]
"""
生成二元运算符的位置
"""
remain = 1
position = []
for i in range(length):
position.append(randint(0, remain))
remain += 1 - position[i]
if remain > 1:
position[-1] += remain - 1
"""
生成一元运算符序列
"""
op1Arr = []
if level:
if level == 1:
op1Arr.append(Generator.opset[randint(4, 5)])
elif level == 2:
op1Arr.append(Generator.opset[randint(6, 8)])
for i in range(randint(0, level)):
op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else
8)])
shuffle(op1Arr)
"""
生成后缀表达式
"""
expression = numArr
offset = 2
index = 0
for i in range(length):
for j in range(position[i]):
expression.insert(i + j + offset, op2Arr[index])
index += 1
offset += position[i]
for op in op1Arr:
expression.insert(randint(1, len(expression)), op)
def getPriority(item):
"""
返回运算符或操作数的优先级
操作数:0
一元运算符:1
'*'、'/':2
'+'、'-':3
"""
if isinstance(item, int):
return 0
elif item == '+' or item == '-':
return 3
elif item == '*' or item == '/':
return 2
else:
return 1
"""
转换成中缀表达式
stack 存储 (expression, priority)
"""
stack = []
for e in expression:
priority = getPriority(e)
if priority == 0:
"""
是一个操作数,直接入栈
"""
stack.append((e, 0))
elif priority == 3:
"""
是加/减运算,优先级最低,拼接后直接入栈
"""
item2 = stack.pop()[0]
item1 = stack.pop()[0]
stack.append(('%s%s%s' % (item1, e, item2), 3))
elif priority == 2:
"""
是乘/除运算,如果有加/减运算需要加括号
"""
item2, prio2 = stack.pop()
if prio2 > 2:
item2 = '(%s)' % item2
item1, prio1 = stack.pop()
if prio1 > 2:
item1 = '(%s)' % item1
stack.append(('%s%s%s' % (item1, e, item2), 2))
elif priority == 1:
"""
是一元运算,除了操作数都要加括号
"""
item, prio = stack.pop()
if prio:
item = '(%s)' % item
if e == '²':
stack.append(('%s%s' % (item, '²'), 1))
else:
stack.append(('%s%s' % (e, item), 1))
return stack[0][0]
<|reserved_special_token_1|>
from random import randint, shuffle
class Generator:
opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']
@staticmethod
def generate(level):
"""
根据 level 生成指定等级的算术题
0:小学;1:初中;2:高中
"""
"""
生成操作数序列以及二元运算符序列
"""
length = randint(0 if level else 1, 4)
op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]
numArr = [randint(1, 100) for i in range(length + 1)]
"""
生成二元运算符的位置
"""
remain = 1
position = []
for i in range(length):
position.append(randint(0, remain))
remain += 1 - position[i]
if remain > 1:
position[-1] += remain - 1
"""
生成一元运算符序列
"""
op1Arr = []
if level:
if level == 1:
op1Arr.append(Generator.opset[randint(4, 5)])
elif level == 2:
op1Arr.append(Generator.opset[randint(6, 8)])
for i in range(randint(0, level)):
op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else
8)])
shuffle(op1Arr)
"""
生成后缀表达式
"""
expression = numArr
offset = 2
index = 0
for i in range(length):
for j in range(position[i]):
expression.insert(i + j + offset, op2Arr[index])
index += 1
offset += position[i]
for op in op1Arr:
expression.insert(randint(1, len(expression)), op)
def getPriority(item):
"""
返回运算符或操作数的优先级
操作数:0
一元运算符:1
'*'、'/':2
'+'、'-':3
"""
if isinstance(item, int):
return 0
elif item == '+' or item == '-':
return 3
elif item == '*' or item == '/':
return 2
else:
return 1
"""
转换成中缀表达式
stack 存储 (expression, priority)
"""
stack = []
for e in expression:
priority = getPriority(e)
if priority == 0:
"""
是一个操作数,直接入栈
"""
stack.append((e, 0))
elif priority == 3:
"""
是加/减运算,优先级最低,拼接后直接入栈
"""
item2 = stack.pop()[0]
item1 = stack.pop()[0]
stack.append(('%s%s%s' % (item1, e, item2), 3))
elif priority == 2:
"""
是乘/除运算,如果有加/减运算需要加括号
"""
item2, prio2 = stack.pop()
if prio2 > 2:
item2 = '(%s)' % item2
item1, prio1 = stack.pop()
if prio1 > 2:
item1 = '(%s)' % item1
stack.append(('%s%s%s' % (item1, e, item2), 2))
elif priority == 1:
"""
是一元运算,除了操作数都要加括号
"""
item, prio = stack.pop()
if prio:
item = '(%s)' % item
if e == '²':
stack.append(('%s%s' % (item, '²'), 1))
else:
stack.append(('%s%s' % (e, item), 1))
return stack[0][0]
|
flexible
|
{
"blob_id": "6e3bb17696953256af6d8194128427acebf1daac",
"index": 524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Generator:\n <mask token>\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-3": "<mask token>\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-4": "from random import randint, shuffle\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
# Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow
# Authors:
# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
# Model file links collection (replace .sh script): Twenkid
# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt
# ImageAI: https://github.com/OlafenwaMoses/ImageAI
# # YOLOv3:
# yolo.h5
# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream
# yolo-tiny.h5
# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream
# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021
# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg
# Detect with YOLO or ImageAI etc. then use DNN
# Specify the paths for the 2 files
# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.
#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8
#ImageAI: older versions require tf 1.x
#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works
# Set the paths to models, images etc.
# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible.
import cv2
import tensorflow.compat.v1 as tf
from imageai.Detection import ObjectDetection
import os
boxes = []
def yolo():
#name = "k.jpg"
root = "Z:\\"
name = "23367640.png" #t.jpg" #"p1.jpg" #"2w.jpg" #"grigor.jpg" #"2w.jpg" #"pose1.webp" #1.jpg"
execution_path = os.getcwd()
yolo_path = "Z:\\yolo.h5"
#yolo_path = "Z:\\yolo-tiny.h5"
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
#detector.setModelTypeAsTinyYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path , yolo_path))
else:
detector.setModelPath(yolo_path)
#dir(detector)
detector.loadModel()
#loaded_model = tf.keras.models.load_model("./src/mood-saved-models/"model + ".h5")
#loaded_model = tf.keras.models.load_model(detector.)
#path = "E:\capture_023_29092020_150305.jpg" #IMG_20200528_044908.jpg"
#pathOut = "E:\YOLO_capture_023_29092020_150305.jpg"
#path = "pose1.webp" #E:\\capture_046_29092020_150628.jpg"
pathOut = "yolo_out_2.jpg"
path = root + name
pathOut = root + name + "yolo_out" + ".jpg"
detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)
for eachObject in detections:
print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
print("--------------------------------")
return detections, path
det,path = yolo()
yoloImage = cv2.imread(path) #crop regions from it
for i in det:
print(i)
protoFile = "Z:\\pose\\mpi\\pose_deploy_linevec_faster_4_stages.prototxt"
#protoFile = "pose_deploy_linevec_faster_4_stages.prototxt"
#weightsFile = "Z:\\pose\\mpi\\pose_iter_440000.caffemodel"
weightsFile = "Z:\\pose\\mpi\\pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_440000.caffemodel"
# Read the network into Memory
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
"""
{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1
8, 38, 153, 397]}
{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3
86, 93, 428, 171]}
{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [
585, 99, 641, 180]}
{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [
126, 178, 164, 290]}
{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2
93, 80, 394, 410]}
{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4
78, 88, 589, 410]}
{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1
, 212, 39, 300]}
{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [
153, 193, 192, 306]}
{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [
226, 198, 265, 308]}
{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point
s': [229, 50, 269, 94]}
{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4
23, 110, 457, 160]}
H, W, Ch 407 211 3
"""
yolo_thr = 70 #in percents, not 0.7
collected = []
bWiden = False
for d in det:
if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:
x1,y1,x2,y2 = d['box_points']
if bWiden:
x1-=20
x2+=20
y1-=30
y2+=30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name']+str(x1), cropped)
collected.append(cropped) #or copy first?
cv2.waitKey()
#x1,y1, ...
# for i in collected: cv2.imshow("COLLECTED?", i); cv2.waitKey() #OK
# Read image
#frame = cv2.imread("Z:\\23367640.png") #1.jpg")
#src = "Z:\\2w.jpg" #z:\\pose1.webp" #nacep1.jpg"
#src = "z:\\pose1.webp"
srcs = ["z:\\pose1.webp","Z:\\2w.jpg", "Z:\\grigor.jpg"]
id = 2
#src = srcs[2]
src = path #from first yolo, in order to compare
frame = cv2.imread(src)
cv2.imshow("FRAME"+src, frame)
#frameWidth, frameHeight, _ = frame.shape
frameHeight, frameWidth, ch = frame.shape
print("H, W, Ch", frameHeight, frameWidth, ch)
# Specify the input image dimensions
inWidth = 368 #184 #368
inHeight = 368 #184 #368
# Prepare the frame to be fed to the network
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.3
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frame, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",frame)
def Detect(image): #inWidth, Height ... - global, set as params later
frameHeight, frameWidth, ch = image.shape
# Prepare the image to be fed to the network
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(image, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",image)
cv2.waitKey()
for i in collected: Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "c80ae9d2eb07fd716a80a5e2d7b5237925fda02c",
"index": 5861,
"step-1": "<mask token>\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\n<mask token>\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\n<mask token>\nfor i in det:\n print(i)\n<mask token>\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\n<mask token>\ncv2.imshow('FRAME' + src, frame)\n<mask token>\nprint('H, W, Ch', frameHeight, frameWidth, ch)\n<mask token>\nnet.setInput(inpBlob)\nprint(inpBlob)\n<mask token>\nprint(output)\nprint('========')\n<mask token>\nprint('Keypoints from output?', Keypoints)\n<mask token>\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nboxes = []\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\ndet, path = yolo()\nyoloImage = cv2.imread(path)\nfor i in det:\n print(i)\nprotoFile = 'Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt'\nweightsFile = 'Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel'\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n<mask token>\nyolo_thr = 70\ncollected = []\nbWiden = False\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\nsrcs = ['z:\\\\pose1.webp', 'Z:\\\\2w.jpg', 'Z:\\\\grigor.jpg']\nid = 2\nsrc = path\nframe = cv2.imread(src)\ncv2.imshow('FRAME' + src, frame)\nframeHeight, frameWidth, ch = frame.shape\nprint('H, W, Ch', frameHeight, frameWidth, ch)\ninWidth = 368\ninHeight = 368\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, \n 0, 0), swapRB=False, crop=False)\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\nprint(output)\nprint('========')\nH = output.shape[2]\nW = output.shape[3]\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint('Keypoints from output?', Keypoints)\nKeypoints = 15\nlabels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',\n 'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',\n 'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'\n ]\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport tensorflow.compat.v1 as tf\nfrom imageai.Detection import ObjectDetection\nimport os\nboxes = []\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\ndet, path = yolo()\nyoloImage = cv2.imread(path)\nfor i in det:\n print(i)\nprotoFile = 'Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt'\nweightsFile = 'Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel'\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n<mask token>\nyolo_thr = 70\ncollected = []\nbWiden = False\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\nsrcs = ['z:\\\\pose1.webp', 'Z:\\\\2w.jpg', 'Z:\\\\grigor.jpg']\nid = 2\nsrc = path\nframe = cv2.imread(src)\ncv2.imshow('FRAME' + src, frame)\nframeHeight, frameWidth, ch = frame.shape\nprint('H, W, Ch', frameHeight, frameWidth, ch)\ninWidth = 368\ninHeight = 368\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, \n 0, 0), swapRB=False, crop=False)\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\nprint(output)\nprint('========')\nH = output.shape[2]\nW = output.shape[3]\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint('Keypoints from output?', Keypoints)\nKeypoints = 15\nlabels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',\n 'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',\n 'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'\n ]\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "# Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow\n# Authors:\n# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/\n# Model file links collection (replace .sh script): Twenkid\n# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel\n#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt\n# ImageAI: https://github.com/OlafenwaMoses/ImageAI\n# # YOLOv3:\n# yolo.h5\n# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream\n# yolo-tiny.h5\n# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream\n# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021\n# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg\n# Detect with YOLO or ImageAI etc. then use DNN\n# Specify the paths for the 2 files\n# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.\n#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8\n#ImageAI: older versions require tf 1.x\n#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works\n# Set the paths to models, images etc.\n# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible. \n\nimport cv2\nimport tensorflow.compat.v1 as tf\nfrom imageai.Detection import ObjectDetection\nimport os\nboxes = []\n\ndef yolo():\n #name = \"k.jpg\"\n root = \"Z:\\\\\"\n name = \"23367640.png\" #t.jpg\" #\"p1.jpg\" #\"2w.jpg\" #\"grigor.jpg\" #\"2w.jpg\" #\"pose1.webp\" #1.jpg\"\n execution_path = os.getcwd()\n yolo_path = \"Z:\\\\yolo.h5\"\n #yolo_path = \"Z:\\\\yolo-tiny.h5\"\n localdir = False\n\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n #detector.setModelTypeAsTinyYOLOv3()\n \n if localdir:\n detector.setModelPath(os.path.join(execution_path , yolo_path))\n else: \n detector.setModelPath(yolo_path)\n\n #dir(detector)\n detector.loadModel()\n #loaded_model = tf.keras.models.load_model(\"./src/mood-saved-models/\"model + \".h5\")\n #loaded_model = tf.keras.models.load_model(detector.)\n\n #path = \"E:\\capture_023_29092020_150305.jpg\" #IMG_20200528_044908.jpg\"\n #pathOut = \"E:\\YOLO_capture_023_29092020_150305.jpg\"\n\n #path = \"pose1.webp\" #E:\\\\capture_046_29092020_150628.jpg\"\n pathOut = \"yolo_out_2.jpg\"\n\n\n \n path = root + name\n pathOut = root + name + \"yolo_out\" + \".jpg\"\n\n detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)\n\n for eachObject in detections:\n print(eachObject[\"name\"] , \" : \", eachObject[\"percentage_probability\"], \" : \", eachObject[\"box_points\"] )\n print(\"--------------------------------\")\n return detections, path\n\ndet,path = yolo()\nyoloImage = cv2.imread(path) #crop regions from it \nfor i in det:\n print(i)\n \n\nprotoFile = \"Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt\"\n#protoFile = \"pose_deploy_linevec_faster_4_stages.prototxt\"\n#weightsFile = \"Z:\\\\pose\\\\mpi\\\\pose_iter_440000.caffemodel\"\nweightsFile = \"Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel\"\n#weightsFile = \"pose_iter_160000.caffemodel\"\n#weightsFile = \"pose_iter_440000.caffemodel\"\n\n# Read the network into Memory\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n\n\"\"\"\n{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1\n8, 38, 153, 397]}\n{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3\n86, 93, 428, 171]}\n{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [\n585, 99, 641, 180]}\n{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [\n126, 178, 164, 290]}\n{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2\n93, 80, 394, 410]}\n{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4\n78, 88, 589, 410]}\n{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1\n, 212, 39, 300]}\n{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [\n153, 193, 192, 306]}\n{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [\n226, 198, 265, 308]}\n{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point\ns': [229, 50, 269, 94]}\n{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4\n23, 110, 457, 160]}\nH, W, Ch 407 211 3\n\"\"\"\nyolo_thr = 70 #in percents, not 0.7\ncollected = []\nbWiden = False\nfor d in det:\n if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:\n x1,y1,x2,y2 = d['box_points']\n if bWiden:\n x1-=20\n x2+=20\n y1-=30\n y2+=30\n cropped = yoloImage[y1:y2, x1:x2] \n cv2.imshow(d['name']+str(x1), cropped)\n collected.append(cropped) #or copy first?\n cv2.waitKey()\n #x1,y1, ...\n\n# for i in collected: cv2.imshow(\"COLLECTED?\", i); cv2.waitKey() #OK\n \n# Read image\n#frame = cv2.imread(\"Z:\\\\23367640.png\") #1.jpg\")\n#src = \"Z:\\\\2w.jpg\" #z:\\\\pose1.webp\" #nacep1.jpg\"\n#src = \"z:\\\\pose1.webp\" \nsrcs = [\"z:\\\\pose1.webp\",\"Z:\\\\2w.jpg\", \"Z:\\\\grigor.jpg\"]\nid = 2\n#src = srcs[2] \nsrc = path #from first yolo, in order to compare\n\nframe = cv2.imread(src)\ncv2.imshow(\"FRAME\"+src, frame)\n#frameWidth, frameHeight, _ = frame.shape\nframeHeight, frameWidth, ch = frame.shape\nprint(\"H, W, Ch\", frameHeight, frameWidth, ch)\n \n# Specify the input image dimensions\ninWidth = 368 #184 #368\ninHeight = 368 #184 #368\n\n# Prepare the frame to be fed to the network\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n\n#cv2.imshow(\"G\", inpBlob) #unsupported\n#cv2.waitKey(0)\n\n# Set the prepared object as the input blob of the network\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\n\nprint(output)\n\nprint(\"========\")\n\nH = output.shape[2]\nW = output.shape[3]\n# Empty list to store the detected keypoints\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint(\"Keypoints from output?\", Keypoints)\nKeypoints = 15 #MPI ... returns only 15\n\nlabels = [\"Head\", \"Neck\", \"Right Shoulder\", \"Right Elbow\", \"Right Wrist\", \"Left Shoulder\", \"Left Elbow\", \"Left Wrist\", \"Right Hip\", \"Right Knee\", \"Right Ankle\", \"Left Hip\", \"Left Knee\", \"Left Ankle\", \"Chest\", \"Background\"]\n\n#for i in range(len()):\nfor i in range(Keypoints): #?\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold :\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(frame, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\nprint(points)\n\ncv2.imshow(\"Output-Keypoints\",frame)\n\ndef Detect(image): #inWidth, Height ... - global, set as params later \n frameHeight, frameWidth, ch = image.shape\n # Prepare the image to be fed to the network\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n\n #cv2.imshow(\"G\", inpBlob) #unsupported\n #cv2.waitKey(0)\n\n # Set the prepared object as the input blob of the network\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n\n print(output)\n\n print(\"========\")\n\n H = output.shape[2]\n W = output.shape[3]\n # Empty list to store the detected keypoints\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print(\"Keypoints from output?\", Keypoints)\n Keypoints = 15 #MPI ... returns only 15\n\n labels = [\"Head\", \"Neck\", \"Right Shoulder\", \"Right Elbow\", \"Right Wrist\", \"Left Shoulder\", \"Left Elbow\", \"Left Wrist\", \"Right Hip\", \"Right Knee\", \"Right Ankle\", \"Left Hip\", \"Left Knee\", \"Left Ankle\", \"Chest\", \"Background\"]\n\n #for i in range(len()):\n for i in range(Keypoints): #?\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold :\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(image, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\n print(points)\n cv2.imshow(\"Output-Keypoints\",image)\n cv2.waitKey()\n\nfor i in collected: Detect(i)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
''' mock_proto.py '''
from heron.common.src.python import constants
import heron.proto.execution_state_pb2 as protoEState
import heron.proto.physical_plan_pb2 as protoPPlan
import heron.proto.tmaster_pb2 as protoTmaster
import heron.proto.topology_pb2 as protoTopology
# pylint: disable=no-self-use, missing-docstring
class MockProto(object):
''' Mocking Proto'''
topology_name = "mock_topology_name"
topology_id = "mock_topology_id"
cluster = "mock_topology_cluster"
environ = "mock_topology_environ"
def create_mock_spout(self,
spout_name,
output_streams,
spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self,
bolt_name,
input_streams,
output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(
self,
spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
# Stream1
stream1 = protoTopology.StreamId()
stream1.id = "mock_stream1"
stream1.component_name = "mock_spout"
# Spout1
spout = self.create_mock_spout("mock_spout", [stream1], spout_parallelism)
topology.spouts.extend([spout])
# Bolt1
bolt = self.create_mock_bolt("mock_bolt", [stream1], [], bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(
self,
spout_parallelism=1,
bolt1_parallelism=1,
bolt2_parallelism=1,
bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = "mock_topology_id"
topology.name = "mock_topology_name"
# Streams
stream1 = protoTopology.StreamId()
stream1.id = "mock_stream1"
stream1.component_name = "mock_spout1"
stream2 = protoTopology.StreamId()
stream2.id = "mock_stream2"
stream2.component_name = "mock_spout1"
stream3 = protoTopology.StreamId()
stream3.id = "mock_stream3"
stream3.component_name = "mock_bolt1"
stream4 = protoTopology.StreamId()
stream4.id = "mock_stream4"
stream4.component_name = "mock_bolt2"
# Spouts
spout1 = self.create_mock_spout("mock_spout1",
[stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
# Bolts
bolt1 = self.create_mock_bolt("mock_bolt1",
[stream1],
[stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt("mock_bolt2",
[stream2],
[stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt("mock_bolt3",
[stream3, stream4],
[],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(
self,
spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism,
bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(
self,
spout_parallelism=1,
bolt1_parallelism=1,
bolt2_parallelism=1,
bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism,
bolt1_parallelism,
bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
def add_topology_config(self, topology, key, value):
kv = topology.topology_config.kvs.add()
kv.key = key
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(value)
|
normal
|
{
"blob_id": "002ef36bd132f1ac258b3f8baf8098accbd8a8f2",
"index": 6839,
"step-1": "<mask token>\n\n\nclass MockProto(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MockProto(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-3": "<mask token>\n\n\nclass MockProto(object):\n \"\"\" Mocking Proto\"\"\"\n topology_name = 'mock_topology_name'\n topology_id = 'mock_topology_id'\n cluster = 'mock_topology_cluster'\n environ = 'mock_topology_environ'\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-4": "<mask token>\nfrom heron.common.src.python import constants\nimport heron.proto.execution_state_pb2 as protoEState\nimport heron.proto.physical_plan_pb2 as protoPPlan\nimport heron.proto.tmaster_pb2 as protoTmaster\nimport heron.proto.topology_pb2 as protoTopology\n\n\nclass MockProto(object):\n \"\"\" Mocking Proto\"\"\"\n topology_name = 'mock_topology_name'\n topology_id = 'mock_topology_id'\n cluster = 'mock_topology_cluster'\n environ = 'mock_topology_environ'\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-5": "''' mock_proto.py '''\nfrom heron.common.src.python import constants\nimport heron.proto.execution_state_pb2 as protoEState\nimport heron.proto.physical_plan_pb2 as protoPPlan\nimport heron.proto.tmaster_pb2 as protoTmaster\nimport heron.proto.topology_pb2 as protoTopology\n\n# pylint: disable=no-self-use, missing-docstring\nclass MockProto(object):\n ''' Mocking Proto'''\n topology_name = \"mock_topology_name\"\n topology_id = \"mock_topology_id\"\n cluster = \"mock_topology_cluster\"\n environ = \"mock_topology_environ\"\n\n def create_mock_spout(self,\n spout_name,\n output_streams,\n spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self,\n bolt_name,\n input_streams,\n output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(\n self,\n spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n\n # Stream1\n stream1 = protoTopology.StreamId()\n stream1.id = \"mock_stream1\"\n stream1.component_name = \"mock_spout\"\n\n # Spout1\n spout = self.create_mock_spout(\"mock_spout\", [stream1], spout_parallelism)\n topology.spouts.extend([spout])\n\n # Bolt1\n bolt = self.create_mock_bolt(\"mock_bolt\", [stream1], [], bolt_parallelism)\n topology.bolts.extend([bolt])\n\n return topology\n\n def create_mock_medium_topology(\n self,\n spout_parallelism=1,\n bolt1_parallelism=1,\n bolt2_parallelism=1,\n bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = \"mock_topology_id\"\n topology.name = \"mock_topology_name\"\n\n # Streams\n stream1 = protoTopology.StreamId()\n stream1.id = \"mock_stream1\"\n stream1.component_name = \"mock_spout1\"\n\n stream2 = protoTopology.StreamId()\n stream2.id = \"mock_stream2\"\n stream2.component_name = \"mock_spout1\"\n\n stream3 = protoTopology.StreamId()\n stream3.id = \"mock_stream3\"\n stream3.component_name = \"mock_bolt1\"\n\n stream4 = protoTopology.StreamId()\n stream4.id = \"mock_stream4\"\n stream4.component_name = \"mock_bolt2\"\n\n # Spouts\n spout1 = self.create_mock_spout(\"mock_spout1\",\n [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n\n # Bolts\n bolt1 = self.create_mock_bolt(\"mock_bolt1\",\n [stream1],\n [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt(\"mock_bolt2\",\n [stream2],\n [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt(\"mock_bolt3\",\n [stream3, stream4],\n [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n\n\n return topology\n\n def create_mock_simple_physical_plan(\n self,\n spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism,\n bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(\n self,\n spout_parallelism=1,\n bolt1_parallelism=1,\n bolt2_parallelism=1,\n bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism,\n bolt1_parallelism,\n bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('--rs', type=str, nargs='+')
<|reserved_special_token_0|>
for f in args.rs:
df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')
post_df = df if post_df is None else post_df.union(df)
<|reserved_special_token_0|>
ret.write.orc('RS.orc', mode='overwrite')
ret.write.json('RS.json', mode='overwrite')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser = argparse.ArgumentParser()
parser.add_argument('--rs', type=str, nargs='+')
args = parser.parse_args()
ss = SparkSession.builder.getOrCreate()
post_df = None
for f in args.rs:
df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')
post_df = df if post_df is None else post_df.union(df)
subreddit_ids = pickle.load(open('subreddit_ids', 'rb'))
ret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)
ret.write.orc('RS.orc', mode='overwrite')
ret.write.json('RS.json', mode='overwrite')
<|reserved_special_token_1|>
import argparse
import pickle
import pandas as pd
from pyspark.sql.session import SparkSession
parser = argparse.ArgumentParser()
parser.add_argument('--rs', type=str, nargs='+')
args = parser.parse_args()
ss = SparkSession.builder.getOrCreate()
post_df = None
for f in args.rs:
df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')
post_df = df if post_df is None else post_df.union(df)
subreddit_ids = pickle.load(open('subreddit_ids', 'rb'))
ret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)
ret.write.orc('RS.orc', mode='overwrite')
ret.write.json('RS.json', mode='overwrite')
|
flexible
|
{
"blob_id": "e6b3def6ed6f2523d88912832a876caf2742b786",
"index": 7572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--rs', type=str, nargs='+')\n<mask token>\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\n<mask token>\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('--rs', type=str, nargs='+')\nargs = parser.parse_args()\nss = SparkSession.builder.getOrCreate()\npost_df = None\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\nsubreddit_ids = pickle.load(open('subreddit_ids', 'rb'))\nret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n",
"step-4": "import argparse\nimport pickle\nimport pandas as pd\nfrom pyspark.sql.session import SparkSession\nparser = argparse.ArgumentParser()\nparser.add_argument('--rs', type=str, nargs='+')\nargs = parser.parse_args()\nss = SparkSession.builder.getOrCreate()\npost_df = None\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\nsubreddit_ids = pickle.load(open('subreddit_ids', 'rb'))\nret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import subprocess
from whoosh.index import create_in
from whoosh.fields import *
import os
import codecs
from whoosh.qparser import QueryParser
import whoosh.index as index
import json
from autosub.autosub import autosub
from azure.storage.blob import AppendBlobService
vedio_formats = ['mp4','avi','wmv','mov'] # 1
audio_formats = ['wav','flac','mp3','aiff'] # 2
def file_upload(file_pwd, append_blob_service):
regex = r"(.+)\/(.+)"
if re.search(regex, file_pwd):
match = re.search(regex, file_pwd)
file_dir = match.group(1) + '/'
file_name_and_type = match.group(2).lower()
else:
raise fileNameError('fileNameError')
regex = r"(.+)\.(.+)"
if re.search(regex, file_name_and_type):
match = re.search(regex, file_name_and_type)
file_name = match.group(1)
file_type = match.group(2).lower()
else:
raise fileNameError('fileNameError')
transcript = autosub(file_pwd, format="json")
print "Generated data structure: \n"
print(file_name_and_type)
whoosh_indexing(file_name_and_type,file_pwd,transcript, append_blob_service)
return transcript
# def autosubing(file_pwd,transcripts_timed_pwd,file_type):
# if not os.path.isfile(transcripts_timed_pwd):
# if file_format(file_type) == 1:
# # command = "python ./autosub/autosub.py -F json -V %s" %(file_pwd)
# # command = "python ./autosub/autosub.py %s -F json" %(file_pwd)
# autosub(file_pwd, format="json")
# elif file_format(file_type) == 2:
# # command = "python ./autosub/autosub.py %s -F json" %(file_pwd)
# autosub(file_pwd, format="json")
# else:
# autosub(file_pwd, format="json")
# print "Autosubed"
# else:
# print 'file has already been autosubed'
def whoosh_indexing(file_name,file_pwd,transcript, append_blob_service):
transcripts_timed = json.loads(transcript)
transcripts_content = ''
for i in transcripts_timed:
transcripts_content = transcripts_content + ' ' + i['content']
# Whoosh the search engine
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)
if not os.path.exists("temp_index"):
os.mkdir("temp_index")
#ix = index.create_in("temp_index", schema)
ix = index.open_dir("temp_index")
writer = ix.writer()
writer.update_document(title=file_name.decode('utf-8'), path=file_pwd.decode('utf-8'), content=transcripts_content.decode('utf-8'))
writer.commit()
# for filename in os.listdir('temp_index'):
# root, ext = os.path.splitext(filename)
# if root.startswith('MAIN_') and ext == '.seg':
# file = filename
# print(os.path.join('temp_index', file))
# append_blob_service.create_blob('search-file', file)
# append_blob_service.append_blob_from_path(
# 'search-file',
# file,
# os.path.join('temp_index', file)
# )
print("Written")
# throw formatError
def file_format(file_type):
if file_type in vedio_formats:
return 1;
elif file_type in audio_formats:
return 2
else:
return 3
|
normal
|
{
"blob_id": "7b5a16fdc536eb4ae3fdc08f827663613560187a",
"index": 8642,
"step-1": "import subprocess\nfrom whoosh.index import create_in\nfrom whoosh.fields import *\nimport os\nimport codecs\nfrom whoosh.qparser import QueryParser\nimport whoosh.index as index\nimport json\nfrom autosub.autosub import autosub\nfrom azure.storage.blob import AppendBlobService\n\nvedio_formats = ['mp4','avi','wmv','mov'] # 1\naudio_formats = ['wav','flac','mp3','aiff'] # 2\n\ndef file_upload(file_pwd, append_blob_service):\n\tregex = r\"(.+)\\/(.+)\"\n\tif re.search(regex, file_pwd):\n\t\tmatch = re.search(regex, file_pwd)\n\t\tfile_dir = match.group(1) + '/'\n\t\tfile_name_and_type = match.group(2).lower() \n\telse:\n\t\traise fileNameError('fileNameError')\n\tregex = r\"(.+)\\.(.+)\"\n\tif re.search(regex, file_name_and_type):\n\t match = re.search(regex, file_name_and_type)\n\t file_name = match.group(1)\n\t file_type = match.group(2).lower()\n\telse: \n\t\traise fileNameError('fileNameError')\n\ttranscript = autosub(file_pwd, format=\"json\")\n\tprint \"Generated data structure: \\n\"\n\tprint(file_name_and_type)\n\twhoosh_indexing(file_name_and_type,file_pwd,transcript, append_blob_service)\n\treturn transcript\n\n# def autosubing(file_pwd,transcripts_timed_pwd,file_type):\n# \tif not os.path.isfile(transcripts_timed_pwd):\n# \t\tif file_format(file_type) == 1:\t\n# \t\t\t# command = \"python ./autosub/autosub.py -F json -V %s\" %(file_pwd)\n# \t\t\t# command = \"python ./autosub/autosub.py %s -F json\" %(file_pwd)\n# \t\t\tautosub(file_pwd, format=\"json\")\n# \t\telif file_format(file_type) == 2:\n# \t\t\t# command = \"python ./autosub/autosub.py %s -F json\" %(file_pwd)\n# \t\t\tautosub(file_pwd, format=\"json\")\n# \t\telse:\n# \t\t\tautosub(file_pwd, format=\"json\")\n# \t\tprint \"Autosubed\"\n# \telse: \n# \t\tprint 'file has already been autosubed'\n\ndef whoosh_indexing(file_name,file_pwd,transcript, append_blob_service):\n\ttranscripts_timed = json.loads(transcript)\n\ttranscripts_content = ''\n\tfor i in transcripts_timed:\n\t\ttranscripts_content = transcripts_content + ' ' + i['content']\n\t# Whoosh the search engine\n\tschema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)\n\tif not os.path.exists(\"temp_index\"):\n\t os.mkdir(\"temp_index\")\n\t#ix = index.create_in(\"temp_index\", schema)\n\tix = index.open_dir(\"temp_index\")\n\twriter = ix.writer()\n\twriter.update_document(title=file_name.decode('utf-8'), path=file_pwd.decode('utf-8'), content=transcripts_content.decode('utf-8'))\n\twriter.commit()\n\n\t# for filename in os.listdir('temp_index'):\n\t# root, ext = os.path.splitext(filename)\n\t# if root.startswith('MAIN_') and ext == '.seg':\n\t# file = filename\n\n\t# print(os.path.join('temp_index', file))\n\t# append_blob_service.create_blob('search-file', file)\n\t# append_blob_service.append_blob_from_path(\n\t# \t'search-file',\n\t# \tfile,\n\t# \tos.path.join('temp_index', file)\n\t# )\n\tprint(\"Written\")\n\n# throw formatError\ndef file_format(file_type):\n\tif file_type in vedio_formats:\n\t\treturn 1;\n\telif file_type in audio_formats:\n\t\treturn 2\n\telse: \n\t\treturn 3\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
try:
fh = open('testfile','w')
try:
fh.write('This is my test file for this exception')
finally:
print "Going to close file"
fh.close()
except IOError:
print" Error: can\'t find file or read data"
|
normal
|
{
"blob_id": "a538c6d8c9f99bc37def5817a54c831393c051f3",
"index": 7395,
"step-1": "#!/usr/bin/python\n\n\ntry:\n fh = open('testfile','w')\n try:\n fh.write('This is my test file for this exception')\n finally:\n print \"Going to close file\"\n fh.close()\n\nexcept IOError:\n print\" Error: can\\'t find file or read data\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 23 10:16:40 2014
@author: Yusuke
"""
import math
result = []
for i in range(6 * 9**5):
sum_num = 0
for j_digit in str(i):
sum_num += int(j_digit) ** 5
if sum_num == i:
print i
result.append(i)
print math.fsum(result)
|
normal
|
{
"blob_id": "08ccc58fe139db3f4712aa551b80f6ea57e0ad76",
"index": 1888,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 23 10:16:40 2014\n\n@author: Yusuke\n\"\"\"\nimport math\n\nresult = []\nfor i in range(6 * 9**5):\n sum_num = 0\n for j_digit in str(i):\n sum_num += int(j_digit) ** 5\n \n if sum_num == i:\n print i\n result.append(i)\n \nprint math.fsum(result)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
import re
pdfs_file = './pdf_names_2017.txt'
sessions_file = './session_names_2017.txt'
with open(pdfs_file) as f:
pdf_names = f.read().splitlines()
with open(sessions_file) as f:
session_names = f.read().splitlines()
#for i in xrange(0,len(pdf_names)):
# print str(i+1).zfill(3) + '_-_' + pdf_names[i][:-4] + '_-_' + session_names[i] + pdf_names[i][-4:]
card_pre = """
<section class="section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp">
<header class="section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white">
<i class="material-icons">record_voice_over</i>
</header>
<div class="mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone">
<div class="mdl-card__supporting-text">
"""
card_content = """
<h4>Incidental_Findings_-_Introduction_and_Overview</h4>
Monday_0700_LBerland
"""
card_post_1 = """
</div>
<div class="mdl-card__actions">
<a href="pdf/"""
card_post_2 = """" target="_blank" class="mdl-button">Handout</a>
</div>
</div>
</section>
"""
"""
<section class="section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp">
<header class="section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white">
<i class="material-icons">record_voice_over</i>
</header>
<div class="mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone">
<div class="mdl-card__supporting-text">
<h4>Incidental_Findings_-_Introduction_and_Overview</h4>
Monday_0700_LBerland
</div>
<div class="mdl-card__actions">
<a href="#" class="mdl-button">Handout</a>
</div>
</div>
</section>
"""
for i in xrange(0,len(pdf_names)):
print card_pre + "<h4>" + session_names[i] + "</h4>" + pdf_names[i][:-4].replace("_"," ") + card_post_1 + pdf_names[i] + card_post_2
|
normal
|
{
"blob_id": "e686d8617360c5a3ce35bd4d2bdeb2376b33f53a",
"index": 9726,
"step-1": "#!/usr/bin/env python\n\nimport re\n\n\npdfs_file = './pdf_names_2017.txt'\nsessions_file = './session_names_2017.txt'\n\nwith open(pdfs_file) as f:\n pdf_names = f.read().splitlines()\n\nwith open(sessions_file) as f:\n session_names = f.read().splitlines()\n\n#for i in xrange(0,len(pdf_names)):\n# print str(i+1).zfill(3) + '_-_' + pdf_names[i][:-4] + '_-_' + session_names[i] + pdf_names[i][-4:]\n\n\ncard_pre = \"\"\"\n<section class=\"section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp\">\n <header class=\"section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white\">\n <i class=\"material-icons\">record_voice_over</i>\n </header>\n <div class=\"mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone\">\n <div class=\"mdl-card__supporting-text\">\n\"\"\"\n\ncard_content = \"\"\" \n<h4>Incidental_Findings_-_Introduction_and_Overview</h4>\n Monday_0700_LBerland\n\"\"\"\n\ncard_post_1 = \"\"\"\n </div>\n <div class=\"mdl-card__actions\">\n <a href=\"pdf/\"\"\"\n\n\ncard_post_2 = \"\"\"\" target=\"_blank\" class=\"mdl-button\">Handout</a>\n </div>\n </div>\n</section>\n\"\"\"\n\n\"\"\"\n<section class=\"section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp\">\n <header class=\"section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white\">\n <i class=\"material-icons\">record_voice_over</i>\n </header>\n <div class=\"mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone\">\n <div class=\"mdl-card__supporting-text\">\n <h4>Incidental_Findings_-_Introduction_and_Overview</h4>\n Monday_0700_LBerland\n </div>\n <div class=\"mdl-card__actions\">\n <a href=\"#\" class=\"mdl-button\">Handout</a>\n </div>\n </div>\n</section>\n\"\"\"\n\nfor i in xrange(0,len(pdf_names)):\n print card_pre + \"<h4>\" + session_names[i] + \"</h4>\" + pdf_names[i][:-4].replace(\"_\",\" \") + card_post_1 + pdf_names[i] + card_post_2\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +
(tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
<|reserved_special_token_0|>
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels
[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -
8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,
weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(5e-06)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(
avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,
probs])
train_model.summary()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer
)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.
pre_checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,
usecols=1)
imp = task_importance_weights(data_label - 16)
imp = imp[0:FLAGS.num_classes - 1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',
skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age
], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
loss_f = open(FLAGS.output_loss_txt, 'w')
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.
AUTOTUNE)
it = iter(data_generator)
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
if count % 10 == 0:
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1,
step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ['WM', 'WF', 'BM', 'BF']
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = (tf.data.Dataset.
from_tensor_slices((val_img, val_lab)))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf
.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print('MAE = {} ({})'.format(AE / len(val_img),
test_list[j]))
loss_f.write('Epochs: {}, step = {}'.format(epoch,
count))
loss_f.write(' --> ')
loss_f.write(test_list[j])
loss_f.write(': ')
loss_f.write(str(AE / len(val_img)))
loss_f.write(', ')
loss_f.write('\n')
loss_f.flush()
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=
0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +
(tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size, num_examples - label[label > t
].size])
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels
[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -
8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,
weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(5e-06)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(
avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,
probs])
train_model.summary()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer
)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.
pre_checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,
usecols=1)
imp = task_importance_weights(data_label - 16)
imp = imp[0:FLAGS.num_classes - 1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',
skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age
], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
loss_f = open(FLAGS.output_loss_txt, 'w')
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.
AUTOTUNE)
it = iter(data_generator)
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
if count % 10 == 0:
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1,
step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ['WM', 'WF', 'BM', 'BF']
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = (tf.data.Dataset.
from_tensor_slices((val_img, val_lab)))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf
.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print('MAE = {} ({})'.format(AE / len(val_img),
test_list[j]))
loss_f.write('Epochs: {}, step = {}'.format(epoch,
count))
loss_f.write(' --> ')
loss_f.write(test_list[j])
loss_f.write(': ')
loss_f.write(str(AE / len(val_img)))
loss_f.write(', ')
loss_f.write('\n')
loss_f.flush()
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=
0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
flags.DEFINE_string('img_path',
'/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')
flags.DEFINE_string('txt_path',
'/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'
, 'Text (with label information) directory')
flags.DEFINE_string('val_img_path',
'/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',
'Validate image path')
flags.DEFINE_string('val_txt_path',
'/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'
, 'Validate text path')
flags.DEFINE_string('val_txt_path_2',
'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'
, 'Validataion text path')
flags.DEFINE_integer('img_size', 128, 'Image size')
flags.DEFINE_integer('ch', 3, 'Image channels')
flags.DEFINE_integer('batch_size', 256, 'Train Batch size')
flags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')
flags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')
flags.DEFINE_integer('num_classes', 48, 'Number of classes')
flags.DEFINE_integer('epochs', 5000, 'Total epochs of training')
flags.DEFINE_float('lr', 5e-05, 'Learning rate')
flags.DEFINE_string('weights',
'/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'
, '')
flags.DEFINE_bool('train', True, 'True or False')
flags.DEFINE_bool('pre_checkpoint', False, 'True or False')
flags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')
flags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')
flags.DEFINE_string('graphs', '', '')
flags.DEFINE_integer('n_test', 10000, 'Number of test images')
flags.DEFINE_string('test_txt', '', 'Test text(label) path')
flags.DEFINE_string('test_img', '', 'Test images path')
flags.DEFINE_string('output_loss_txt',
'/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''
)
<|reserved_special_token_0|>
FLAGS(sys.argv)
<|reserved_special_token_0|>
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +
(tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size, num_examples - label[label > t
].size])
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels
[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -
8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,
weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(5e-06)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(
avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,
probs])
train_model.summary()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer
)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.
pre_checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,
usecols=1)
imp = task_importance_weights(data_label - 16)
imp = imp[0:FLAGS.num_classes - 1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',
skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age
], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
loss_f = open(FLAGS.output_loss_txt, 'w')
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.
AUTOTUNE)
it = iter(data_generator)
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
if count % 10 == 0:
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1,
step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ['WM', 'WF', 'BM', 'BF']
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = (tf.data.Dataset.
from_tensor_slices((val_img, val_lab)))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf
.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print('MAE = {} ({})'.format(AE / len(val_img),
test_list[j]))
loss_f.write('Epochs: {}, step = {}'.format(epoch,
count))
loss_f.write(' --> ')
loss_f.write(test_list[j])
loss_f.write(': ')
loss_f.write(str(AE / len(val_img)))
loss_f.write(', ')
loss_f.write('\n')
loss_f.flush()
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=
0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
if __name__ == '__main__':
app.run(main)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
flags.DEFINE_string('img_path',
'/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')
flags.DEFINE_string('txt_path',
'/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'
, 'Text (with label information) directory')
flags.DEFINE_string('val_img_path',
'/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',
'Validate image path')
flags.DEFINE_string('val_txt_path',
'/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'
, 'Validate text path')
flags.DEFINE_string('val_txt_path_2',
'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'
, 'Validataion text path')
flags.DEFINE_integer('img_size', 128, 'Image size')
flags.DEFINE_integer('ch', 3, 'Image channels')
flags.DEFINE_integer('batch_size', 256, 'Train Batch size')
flags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')
flags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')
flags.DEFINE_integer('num_classes', 48, 'Number of classes')
flags.DEFINE_integer('epochs', 5000, 'Total epochs of training')
flags.DEFINE_float('lr', 5e-05, 'Learning rate')
flags.DEFINE_string('weights',
'/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'
, '')
flags.DEFINE_bool('train', True, 'True or False')
flags.DEFINE_bool('pre_checkpoint', False, 'True or False')
flags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')
flags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')
flags.DEFINE_string('graphs', '', '')
flags.DEFINE_integer('n_test', 10000, 'Number of test images')
flags.DEFINE_string('test_txt', '', 'Test text(label) path')
flags.DEFINE_string('test_img', '', 'Test images path')
flags.DEFINE_string('output_loss_txt',
'/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
optimizer = tf.keras.optimizers.Adam(FLAGS.lr, beta_1=0.9, beta_2=0.99)
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS
.img_size - 8]) / 255.0
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +
(tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size, num_examples - label[label > t
].size])
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels
[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -
8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,
weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(5e-06)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(
avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,
probs])
train_model.summary()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer
)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.
pre_checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,
usecols=1)
imp = task_importance_weights(data_label - 16)
imp = imp[0:FLAGS.num_classes - 1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',
skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age
], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
loss_f = open(FLAGS.output_loss_txt, 'w')
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.
AUTOTUNE)
it = iter(data_generator)
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
if count % 10 == 0:
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1,
step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ['WM', 'WF', 'BM', 'BF']
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = (tf.data.Dataset.
from_tensor_slices((val_img, val_lab)))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf
.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print('MAE = {} ({})'.format(AE / len(val_img),
test_list[j]))
loss_f.write('Epochs: {}, step = {}'.format(epoch,
count))
loss_f.write(' --> ')
loss_f.write(test_list[j])
loss_f.write(': ')
loss_f.write(str(AE / len(val_img)))
loss_f.write(', ')
loss_f.write('\n')
loss_f.flush()
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,
usecols=0)
data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=
0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name,
data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
if __name__ == '__main__':
app.run(main)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34
from absl import flags, app
from Rank_consistent_model_fix import *
from Rank_consistent_model import *
from random import shuffle, random
import tensorflow as tf
import numpy as np
# import cv2
import os
import sys
import datetime
flags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')
flags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')
flags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')
flags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')
flags.DEFINE_string("val_txt_path_2", "D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt", "Validataion text path")
flags.DEFINE_integer('img_size', 128, 'Image size')
flags.DEFINE_integer('ch', 3, 'Image channels')
flags.DEFINE_integer('batch_size', 256, 'Train Batch size')
flags.DEFINE_integer("val_batch_size", 128, "Validation Batch size")
flags.DEFINE_integer("val_batch_size_2", 128, "Validation2 batch size")
flags.DEFINE_integer('num_classes', 48, 'Number of classes')
flags.DEFINE_integer('epochs', 5000, 'Total epochs of training')
flags.DEFINE_float("lr", 5e-5, "Learning rate")
flags.DEFINE_string('weights', "/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5", '')
flags.DEFINE_bool('train', True, 'True or False')
flags.DEFINE_bool('pre_checkpoint', False, 'True or False')
flags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')
flags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')
flags.DEFINE_string("graphs", "", "")
flags.DEFINE_integer('n_test', 10000, 'Number of test images')
flags.DEFINE_string('test_txt', '', 'Test text(label) path')
flags.DEFINE_string('test_img', '', 'Test images path')
flags.DEFINE_string("output_loss_txt", "/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt", "")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
optimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
#decode_image = tf.image.per_image_standardization(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.per_image_standardization(decode_image)
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
#@tf.function
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
#total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))
# total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))
total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))
#total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \
# + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size,
num_examples - label[label > t].size])
#print(m_k)
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
# m[i] = float(m_k)**(0.5)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
#print(imp)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
# train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,
batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(0.000005)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
# for attr_ in ["kernel_initializer"]:
# if hasattr(layer, attr_):
# setattr(layer, attr_, initializer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
# avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])
# fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)
# logits = Linear(NUM_CLASSES - 1)(fc)
logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])
train_model.summary()
#for m in train_model.layers:
# if isinstance(m, tf.keras.layers.Conv2D):
# a = m.output_mask
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, (2. / n)**.5)
# elif isinstance(m, tf.keras.layers.BatchNormalization):
# m.get_weights
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)
imp = task_importance_weights(data_label-16)
imp = imp[0:FLAGS.num_classes-1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == "M" and val_data_name[i][3] == "W":
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "W":
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == "M" and val_data_name[i][3] == "B":
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "B":
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
#current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
#train_log_dir = FLAGS.graphs + current_time + '/train'
#val_log_dir = FLAGS.graphs + current_time + '/val'
#train_summary_writer = tf.summary.create_file_writer(train_log_dir)
#val_summary_writer = tf.summary.create_file_writer(val_log_dir)
loss_f = open(FLAGS.output_loss_txt, "w")
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
it = iter(data_generator)
#imp = task_importance_weights(data_label)
#imp = imp[0:FLAGS.num_classes-1]
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
#with val_summary_writer.as_default():
# tf.summary.scalar(u'total loss', loss, step=count)
if count % 10 == 0:
#MAE = test_MAE(train_model, batch_images, batch_labels, levels)
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ["WM", "WF", "BM", "BF"]
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print("MAE = {} ({})".format(AE / len(val_img), test_list[j]))
loss_f.write("Epochs: {}, step = {}".format(epoch, count))
loss_f.write(" --> ")
loss_f.write(test_list[j])
loss_f.write(": ")
loss_f.write(str(AE / len(val_img)))
loss_f.write(", ")
loss_f.write("\n")
loss_f.flush()
# print("==========")
# print("[2]MAE = {}".format(MAE))
# print("==========")
# model_dir = FLAGS.save_checkpoint
# folder_name = int((count + 1)/val_idx)
# folder_name_str = "%s/%s" % (model_dir, folder_name)
# if not os.path.isdir(folder_name_str):
# print("Make {} folder to save checkpoint".format(folder_name))
# os.makedirs(folder_name_str)
# ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
# checkpoint_dir = folder_name_str + "/" + "CORAL_{}_steps.ckpt".format(count)
# ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)
# ckpt_manager.save()
# with val_summary_writer.as_default():
# tf.summary.scalar(u'[2]MAE', MAE, step=count)
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
if __name__ == '__main__':
app.run(main)
|
flexible
|
{
"blob_id": "9ffe350ff9a568111620ef7dafef83d341f6f01e",
"index": 9409,
"step-1": "<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\n@tf.function\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\n<mask token>\n\n\n@tf.function\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\n@tf.function\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\n@tf.function\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\n<mask token>\n",
"step-3": "<mask token>\nflags.DEFINE_string('img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\nflags.DEFINE_string('txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'\n , 'Text (with label information) directory')\nflags.DEFINE_string('val_img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',\n 'Validate image path')\nflags.DEFINE_string('val_txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'\n , 'Validate text path')\nflags.DEFINE_string('val_txt_path_2',\n 'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'\n , 'Validataion text path')\nflags.DEFINE_integer('img_size', 128, 'Image size')\nflags.DEFINE_integer('ch', 3, 'Image channels')\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\nflags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')\nflags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\nflags.DEFINE_float('lr', 5e-05, 'Learning rate')\nflags.DEFINE_string('weights',\n '/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'\n , '')\nflags.DEFINE_bool('train', True, 'True or False')\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\nflags.DEFINE_string('graphs', '', '')\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\nflags.DEFINE_string('test_img', '', 'Test images path')\nflags.DEFINE_string('output_loss_txt',\n '/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''\n )\n<mask token>\nFLAGS(sys.argv)\n<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\n@tf.function\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\n@tf.function\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-4": "<mask token>\nflags.DEFINE_string('img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\nflags.DEFINE_string('txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'\n , 'Text (with label information) directory')\nflags.DEFINE_string('val_img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',\n 'Validate image path')\nflags.DEFINE_string('val_txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'\n , 'Validate text path')\nflags.DEFINE_string('val_txt_path_2',\n 'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'\n , 'Validataion text path')\nflags.DEFINE_integer('img_size', 128, 'Image size')\nflags.DEFINE_integer('ch', 3, 'Image channels')\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\nflags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')\nflags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\nflags.DEFINE_float('lr', 5e-05, 'Learning rate')\nflags.DEFINE_string('weights',\n '/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'\n , '')\nflags.DEFINE_bool('train', True, 'True or False')\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\nflags.DEFINE_string('graphs', '', '')\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\nflags.DEFINE_string('test_img', '', 'Test images path')\nflags.DEFINE_string('output_loss_txt',\n '/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''\n )\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\noptimizer = tf.keras.optimizers.Adam(FLAGS.lr, beta_1=0.9, beta_2=0.99)\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\n@tf.function\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\n@tf.function\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-5": "# -*- coding: utf-8 -*-\n# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34\nfrom absl import flags, app\nfrom Rank_consistent_model_fix import *\nfrom Rank_consistent_model import *\nfrom random import shuffle, random\n\nimport tensorflow as tf\nimport numpy as np\n# import cv2\nimport os\nimport sys\nimport datetime\n\nflags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\n\nflags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')\n\nflags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')\n\nflags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')\n\nflags.DEFINE_string(\"val_txt_path_2\", \"D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt\", \"Validataion text path\")\n\nflags.DEFINE_integer('img_size', 128, 'Image size')\n\nflags.DEFINE_integer('ch', 3, 'Image channels')\n\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\n\nflags.DEFINE_integer(\"val_batch_size\", 128, \"Validation Batch size\")\n\nflags.DEFINE_integer(\"val_batch_size_2\", 128, \"Validation2 batch size\")\n\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\n\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\n\nflags.DEFINE_float(\"lr\", 5e-5, \"Learning rate\")\n\nflags.DEFINE_string('weights', \"/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5\", '')\n\nflags.DEFINE_bool('train', True, 'True or False')\n\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\n\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\n\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\n\nflags.DEFINE_string(\"graphs\", \"\", \"\")\n\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\n\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\n\nflags.DEFINE_string('test_img', '', 'Test images path')\n\nflags.DEFINE_string(\"output_loss_txt\", \"/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt\", \"\")\n\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\n\noptimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)\n\n\ndef _func(filename, label):\n\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.\n #decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])\n\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n\n #decode_image = tf.image.per_image_standardization(decode_image)\n\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n \n return decode_image, one_hot, label\n\ndef val_func(name, label):\n\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.\n #decode_image = tf.image.per_image_standardization(decode_image)\n\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes) \n \n return decode_image, one_hot\n\n#@tf.function\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n@tf.function\ndef train_step(model, images, levels, imp):\n \n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n\n #total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))\n \n # total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))\n total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))\n #total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \\\n # + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)\n\n total_loss = tf.reduce_mean(total_loss)\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n\n y = np.unique(label)\n \n m = np.zeros(label.shape)\n\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, \n num_examples - label[label > t].size])\n #print(m_k)\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n # m[i] = float(m_k)**(0.5)\n\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n #print(imp)\n return imp\n \n@tf.function\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n\n return tf.convert_to_tensor(levels, tf.float32)\n\ndef main(argv=None):\n\n # train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,\n batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')\n\n regularizer = tf.keras.regularizers.l2(0.000005)\n initializer = tf.keras.initializers.glorot_normal()\n\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n # for attr_ in [\"kernel_initializer\"]:\n # if hasattr(layer, attr_):\n # setattr(layer, attr_, initializer)\n\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n # avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])\n # fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)\n\n # logits = Linear(NUM_CLASSES - 1)(fc)\n logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])\n train_model.summary()\n\n #for m in train_model.layers:\n # if isinstance(m, tf.keras.layers.Conv2D):\n # a = m.output_mask\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, (2. / n)**.5)\n # elif isinstance(m, tf.keras.layers.BatchNormalization):\n # m.get_weights\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n\n if FLAGS.pre_checkpoint is True:\n\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)\n\n # if a checkpoint exists, restore the latest checkpoint.\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print ('Latest checkpoint restored!!')\n\n if FLAGS.train == True:\n \n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)\n data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)\n\n imp = task_importance_weights(data_label-16)\n imp = imp[0:FLAGS.num_classes-1]\n\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n\n if val_data_name[i][2] == \"M\" and val_data_name[i][3] == \"W\":\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"F\" and val_data_name[i][3] == \"W\":\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"M\" and val_data_name[i][3] == \"B\":\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"F\" and val_data_name[i][3] == \"B\":\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n \n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]\n\n batch_idx = len(data_label) // FLAGS.batch_size\n\n #current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n #train_log_dir = FLAGS.graphs + current_time + '/train'\n #val_log_dir = FLAGS.graphs + current_time + '/val'\n #train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n #val_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n loss_f = open(FLAGS.output_loss_txt, \"w\")\n count = 0\n for epoch in range(FLAGS.epochs):\n\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n\n data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n it = iter(data_generator)\n\n #imp = task_importance_weights(data_label)\n #imp = imp[0:FLAGS.num_classes-1]\n for step in range(batch_idx):\n\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n\n #with val_summary_writer.as_default():\n # tf.summary.scalar(u'total loss', loss, step=count)\n\n if count % 10 == 0:\n #MAE = test_MAE(train_model, batch_images, batch_labels, levels)\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))\n\n if count % 100 == 0:\n test_list = [\"WM\", \"WF\", \"BM\", \"BF\"]\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n\n val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n\n print(\"MAE = {} ({})\".format(AE / len(val_img), test_list[j]))\n\n loss_f.write(\"Epochs: {}, step = {}\".format(epoch, count))\n loss_f.write(\" --> \")\n loss_f.write(test_list[j])\n loss_f.write(\": \")\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(\", \")\n\n loss_f.write(\"\\n\")\n loss_f.flush()\n\n\n\n # print(\"==========\")\n # print(\"[2]MAE = {}\".format(MAE))\n # print(\"==========\")\n # model_dir = FLAGS.save_checkpoint\n # folder_name = int((count + 1)/val_idx)\n # folder_name_str = \"%s/%s\" % (model_dir, folder_name)\n # if not os.path.isdir(folder_name_str):\n # print(\"Make {} folder to save checkpoint\".format(folder_name))\n # os.makedirs(folder_name_str)\n # ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)\n # checkpoint_dir = folder_name_str + \"/\" + \"CORAL_{}_steps.ckpt\".format(count)\n # ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)\n # ckpt_manager.save()\n\n # with val_summary_writer.as_default():\n # tf.summary.scalar(u'[2]MAE', MAE, step=count)\n\n count += 1\n\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)\n data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)\n\n data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n\n image, labels, opp_labels = next(it)\n\n _, probs = train_model(image, training=False)\n\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
# -*- coding: utf-8 -*-
import pickle
import pathlib
from pathlib import Path
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.utils.data import Dataset, DataLoader
from torchtext.data import get_tokenizer
from matplotlib import pyplot as plt
"""### **Preprocesare**"""
def read_data(directory):
ids = []
texts = []
labels = []
for f in directory.glob('*.txt'):
id = f.name.replace('article', '').replace('.txt', '')
ids.append(id)
texts.append(f.read_text('utf8'))
labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))
# labels can be empty
return ids, texts, labels
def parse_label(label_path):
labels = []
f = Path(label_path)
if not f.exists():
return labels
for line in open(label_path):
parts = line.strip().split('\t')
labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])
labels = sorted(labels)
if labels:
length = max([label[1] for label in labels])
visit = np.zeros(length)
res = []
for label in labels:
if sum(visit[label[0]:label[1]]):
label[3] = 1
else:
visit[label[0]:label[1]] = 1
res.append(label)
return res
else:
return labels
def clean_text(articles, ids):
texts = []
for article, id in zip(articles, ids):
sentences = article.split('\n')
end = -1
res = []
for sentence in sentences:
start = end + 1
end = start + len(sentence) # length of sequence
if sentence != "": # if not empty line
res.append([id, sentence, start, end])
texts.append(res)
return texts
def make_dataset(texts, lbls):
txt = []
lbl = []
for text, label in zip(texts, lbls):
for Text in text:
txt.append(Text[1])
k = 0
for l in label:
if Text[2] < l[0] < Text[3]:
lbl.append(1)
k = 1
break
elif Text[2] < l[1] < Text[3]:
lbl.append(1)
k = 1
break
if k == 0:
lbl.append(0)
return txt, lbl
directory = pathlib.Path('data/protechn_corpus_eval/train')
ids, texts,lbl = read_data(directory)
ids_train = ids
texts_train = texts
lbl_train = lbl
directory = pathlib.Path('data/protechn_corpus_eval/test')
ids_test, texts_test,lbl_test = read_data(directory)
directory = pathlib.Path('data/protechn_corpus_eval/dev')
ids_dev, texts_dev,lbl_dev = read_data(directory)
txt_train = clean_text(texts_train, ids_train)
txt_test = clean_text(texts_test, ids_test)
txt_dev =clean_text(texts_dev, ids_dev)
train_txt, train_lbl = make_dataset(txt_train, lbl_train)
test_txt, test_lbl = make_dataset(txt_test, lbl_test)
dev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)
pickle.dump([dev_txt,dev_lbl], open("savedata/dev.txt", "wb"))
pickle.dump([test_txt,test_lbl], open("savedata/test.txt", "wb"))
pickle.dump([train_txt,train_lbl], open("savedata/train.txt", "wb"))
train_txt, train_lbl = pickle.load(open("savedata/train.txt", "rb"))
test_txt, test_lbl = pickle.load(open("savedata/test.txt", "rb"))
dev_txt, dev_lbl = pickle.load(open("savedata/dev.txt", "rb"))
"""### **Dataset+ data_loader**"""
class Vocabulary:
"""
Helper class that maps words to unique indices and the other way around
"""
def __init__(self, tokens: List[str]):
# dictionary that maps words to indices
self.word_to_idx = {'<PAD>': 0}
for idx, tok in enumerate(tokens, 1):
self.word_to_idx[tok] = idx
# dictionary that maps indices to words
self.idx_to_word = {}
for tok, idx in self.word_to_idx.items():
self.idx_to_word[idx] = tok
def get_token_at_index(self, idx: int):
return self.idx_to_word[idx]
def get_index_of_token(self, token: str):
return self.word_to_idx[token]
def size(self):
return len(self.word_to_idx)
class PropagandaDataset(Dataset):
def __init__(self,
fold: str,
examples: List[str],
labels: List[int],
vocab: Vocabulary):
"""
:type vocab: object
:param fold: 'train'/'eval'/'test'
:param examples: List of sentences/paragraphs
:param labels: List of labels (1 if propaganda, 0 otherwise)
"""
self.fold = fold
self.examples = examples
self.labels = labels
self.vocab = vocab
def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):
"""
This function converts an example to a Tensor containing the indices
:param index: position of example to be retrieved.
"""
# retrieve sentence and label (correct class index)
example, label = self.examples[index], self.labels[index]
# tokenize sentence into words and other symbols
tokenizer = get_tokenizer("spacy")
tokens = tokenizer(example)
# convert tokens to their corresponding indices, according to
# vocabulary
token_indices = []
for i in tokens:
token_indices.append(self.vocab.get_index_of_token(i))
return torch.LongTensor(token_indices), torch.LongTensor(label)
def __len__(self):
"""
Return the size of this dataset. This is given by the number
of sentences.
"""
return len(self.examples)
def collate_sentences(batch: List[Tuple]):
"""
This function converts a list of batch_size examples to
a Tensor of size batch_size x max_len
batch: [(example_1_tensor, example_1_label),
...
(example_batch_size_tensor, example_batch_size_label)]
"""
# fill this list with all the labels in the batch
batch_labels = []
# we need to find the maximum length of a sentence in this batch
max_len = 0
for i in batch:
if len(i[0]) > max_len:
max_len = len(i[0])
batch_size = len(batch)
# print('batch size',batch_size)
# initialize a Tensor filled with zeros (aka index of <PAD>)
batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)
# fill each row idx in batch_sentences with the corresponding
# sequence tensor
#
# ... batch_sentences[idx, ...] = ...
for idx in range(0, batch_size):
# print(idx)
# print(len(batch[idx][0]))
# print(len(batch_sentences[idx]))
batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]
print(batch[idx])
batch_labels.append(batch[idx][1])
# print(batch_sentences[idx])
print(type(batch_labels))
# batch_labels = [torch.LongTensor(x) for x in batch_labels]
batch_labels = torch.tensor(batch_labels)
# print(batch_labels)
return batch_sentences, batch_labels
def fill_vocab(txt: List[Tuple]):
tokenizer = get_tokenizer("spacy")
list_v = []
for i in txt:
tok = tokenizer(i)
for j in tok:
if list_v.count(j) == 0:
list_v.append(j)
vocab = Vocabulary(tokens=list_v)
return vocab
full_text = train_txt + dev_txt
vocab = fill_vocab(full_text)
test_vocab = fill_vocab(test_txt)
train_vocab = fill_vocab(train_txt)
dev_vocab = fill_vocab(dev_txt)
pickle.dump(dev_vocab, open("savedata/dev_vocab.txt", "wb"))
pickle.dump(test_vocab, open("savedata/test_vocab.txt", "wb"))
pickle.dump(train_vocab, open("savedata/train_vocab.txt", "wb"))
pickle.dump(vocab, open("savedata/vocab.txt", "wb"))
dev_vocab = pickle.load(open("savedata/dev_vocab.txt","rb"))
test_vocab = pickle.load(open("savedata/test_vocab.txt","rb"))
train_vocab = pickle.load(open("savedata/train_vocab.txt","rb"))
vocab = pickle.load(open("savedata/vocab.txt", "rb"))
dataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)
train_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)
dataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)
test_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)
dataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)
dev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)
pickle.dump(train_loader, open("savedata/train_loaded.txt", "wb"))
pickle.dump(test_loader, open("savedata/test_loaded.txt", "wb"))
pickle.dump(dev_loader, open("savedata/dev_loaded.txt", "wb"))
train_loader = pickle.load(open("savedata/train_loaded.txt", "rb"))
test_loader = pickle.load(open("savedata/test_loaded.txt", "rb"))
dev_loader = pickle.load(open("savedata/dev_loaded.txt", "rb"))
"""### model"""
############################## PARAMETERS ######################################
_hyperparameters_dict = {
"batch_size": 64,
"num_epochs": 10, # 10,
"max_len": 250,
"embedding_size": 128, # 256,
"rnn_size": 256, # 1024,
"learning_algo": "adam",
"learning_rate": 0.001,
"max_grad_norm": 5.0
}
class RNN(nn.Module):
def __init__(self, vocab_size: int, char_embedding_size: int,
rnn_size: int):
super().__init__()
self.vocab_size = vocab_size
self.char_embedding_size = char_embedding_size
self.rnn_size = rnn_size
self.dropout = nn.Dropout(p=0.3)
# instantiate Modules with the correct arguments
self.embedding = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=char_embedding_size)
self.rnn = nn.LSTM(input_size=char_embedding_size,
hidden_size=rnn_size, bidirectional=True)
# self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,
# hidden_size = rnn_size)
self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)
# self.softmax = nn.Softmax(dim = 2)
self.loss = nn.CrossEntropyLoss()
def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):
"""
Computes loss for a batch of sequences. The sequence loss is the
average of the individual losses at each timestep. The batch loss is
the average of sequence losses across all batches.
:param logits: unnormalized probabilities for T timesteps, size
batch_size x max_timesteps x vocab_size
:param y: ground truth values (index of correct characters), size
batch_size x max_timesteps
:returns: loss as a scalar
"""
#
# logits: B x T x vocab_size
# B x T
# cross entropy: B x vocab_size x T
# B x T
# vision: B x num_classes
# B
return self.loss(logits, y)
def get_logits(self, hidden_states: torch.FloatTensor,
temperature: float = 1.0):
"""
Computes the unnormalized probabilities from hidden states. Optionally
divide logits by a temperature, in order to influence predictions at
test time (https://www.quora.com/What-is-Temperature-in-LSTM)
:param hidden_states: tensor of size batch_size x timesteps x rnn_size
:param temperature: coefficient that scales outputs before turning them
to probabilities. A low temperature (0.1) results in more conservative
predictions, while a higher temperature (0.9) results in more diverse
predictions
:return: tensor of size batch_size x timesteps x vocab_size
"""
return self.logits(hidden_states) / temperature
def forward(self, batch: torch.LongTensor,
hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:
"""
Computes the hidden states for the current batch (x, y).
:param x: input of size batch_size x max_len
:param hidden_start: hidden state at time step t = 0,
size batch_size x rnn_size
:return: hidden states at all timesteps,
size batch_size x timesteps x rnn_size
"""
# max_len = x.size(1)
# x,label = batch
# batch_size x max_len x embedding_dim
x_embedded = self.embedding(batch)
# x_drop = self.dropout
x_drop = self.dropout(x_embedded)
# compute hidden states and logits for each time step
# hidden_states_list = []
# prev_hidden = hidden_start
hidden_state = self.rnn(x_drop)[0]
# print(hidden_state)
# print(hidden_state[0].shape)
# print(hidden_state[1].shape)
# hidden_state = hidden_state.permute(2,1,0)
# hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])
# hidden_state_maxPooled = hidden_state.permute(2,1,0)
hidden_state_pooled, _ = torch.max(hidden_state, dim=1)
output = self.get_logits(hidden_state_pooled)
# Loss = self.loss(output, y)
# hidden_state = softmax(logits(hidden_state))
# batch_size x max_len x rnn_size
# hidden_states = torch.stack(hidden_states_list, dim=1)
return output
# instantiate the RNNLM module
network = RNN(vocab.size(),
_hyperparameters_dict['embedding_size'],
_hyperparameters_dict['rnn_size'])
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
# else:
# device = torch.device('cpu')
# move network to GPU if available
# network = network.to(device)
# device = torch.device('cpu')
# network = network.to(device)
optimizer = Adam(params=network.parameters(), lr=0.001)
# CHECKPOINT: make sure you understand each parameter size
print("Neural network parameters: ")
for param_name, param in network.named_parameters():
print("\t" + param_name, " size: ", param.size())
"""# Training/evaluation loop"""
# Commented out IPython magic to ensure Python compatibility.
class Trainer:
def __init__(self, model: nn.Module,
train_data: torch.LongTensor,
dev_data: torch.LongTensor,
vocab: Vocabulary,
hyperparams: Dict):
self.model = model
self.train_data = train_data
self.dev_data = dev_data
self.vocab = vocab
# self.device = torch.device('cuda:0')
if hyperparams['learning_algo'] == 'adam':
self.optimizer = Adam(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
else:
self.optimizer = SGD(params=self.model.parameters(),
lr=hyperparams['learning_rate'])
self.num_epochs = hyperparams['num_epochs']
self.max_len = hyperparams['max_len']
self.batch_size = hyperparams['batch_size']
self.rnn_size = hyperparams['rnn_size']
self.max_grad_norm = hyperparams['max_grad_norm']
# number of characters in training/dev data
self.train_size = len(train_data)
self.dev_size = len(dev_data)
# number of sequences (X, Y) used for training
self.num_train_examples = \
self.train_size // (self.batch_size * self.max_len) * self.batch_size
def train_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the training set
:param epoch_num: number of current epoch
"""
self.model.train()
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size)
# for batch_num, (x, y) in enumerate(make_batches(self.train_data,
# self.batch_size,
# self.max_len)):
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients in train epoch
self.optimizer.zero_grad()
x = len(batch_tuple[0])
y = len(batch_tuple[0][0])
# compute hidden states
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# compute unnormalized probabilities
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
# compute loss
# scalar
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
# backpropagation (gradient of loss wrt parameters)
batch_loss.backward()
# clip gradients if they get too large
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),
self.max_grad_norm)
# update parameters
self.optimizer.step()
# we use a stateful RNN, which means the first hidden state for the
# next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:] # add comment
if batch_num % 100 == 0:
print("epoch %d, %d/%d examples, batch loss = %f"
% (epoch_num, (batch_num + 1) * self.batch_size,
self.num_train_examples, batch_loss.item()))
epoch_loss /= (batch_num + 1)
return epoch_loss
def eval_epoch(self, epoch_num: int) -> float:
"""
Compute the loss on the validation set
:param epoch_num: number of current epoch
"""
epoch_loss = 0.0
# hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)
with torch.no_grad():
# for batch_num, (x, y) in enumerate(make_batches(self.dev_data,
# self.batch_size,
# self.max_len)):
acc = 0;
for batch_num, batch_tuple in enumerate(self.train_data):
print('batch: ', batch_num)
# reset gradients
# self.optimizer.zero_grad()
# x = len(batch_tuple[0])
# y = len(batch_tuple[0][0])
# batch x timesteps x hidden_size
x, y = batch_tuple
# x = x.to(self.device)
# y = y.to(self.device)
hidden_states = self.model(x)
# batch x timesteps x vocab_size
# logits = self.model.get_logits(hidden_states)
batch_loss = self.model.get_loss(hidden_states, y)
epoch_loss += batch_loss.item()
hidden_states_m = torch.argmax(hidden_states, dim=1)
acc += sum(hidden_states_m == y).item()
# we use a stateful RNN, which means the first hidden state for
# the next batch is the last hidden state of the current batch
# hidden_states.detach_()
# hidden_start = hidden_states[:,-1,:]
epoch_loss /= (batch_num + 1)
return epoch_loss, acc
def train(self) -> Dict:
train_losses, dev_losses, dev_acc = [], [], []
for epoch in range(self.num_epochs):
epoch_train_loss = self.train_epoch(epoch)
epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)
train_losses.append(epoch_train_loss)
dev_losses.append(epoch_dev_loss)
dev_acc.append(epoch_dev_train)
return {"train_losses": train_losses,
"dev_losses": dev_losses,
"dev_acc": epoch_dev_train}
def plot_losses(metrics: Dict):
"""
Plots training/validation losses.
:param metrics: dictionar
"""
plt.figure()
plt.plot(metrics['train_losses'], c='b', label='Train')
plt.plot(metrics['dev_losses'], c='g', label='Valid')
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.legend()
plt.show()
# op= torch.rand(4)
# thx = torch.rand(4)
# thx[0] = op[0]
# t = thx==op
# print(t)
# print(sum(t).item())
# train network for some epoch
trainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)
metrics = trainer.train()
# plot training and validations losses each epoch
plot_losses(metrics)
# for i in train_loader:
# print(len(i[0][0]))
# print(len(i[0]))
# print(i[0])
# x = 1
# while (True)
# x = 0
|
normal
|
{
"blob_id": "9c653719ea511d78de9ddcc19442d9f9f7dc11dc",
"index": 4560,
"step-1": "<mask token>\n\n\nclass Vocabulary:\n \"\"\"\n Helper class that maps words to unique indices and the other way around\n \"\"\"\n\n def __init__(self, tokens: List[str]):\n self.word_to_idx = {'<PAD>': 0}\n for idx, tok in enumerate(tokens, 1):\n self.word_to_idx[tok] = idx\n self.idx_to_word = {}\n for tok, idx in self.word_to_idx.items():\n self.idx_to_word[idx] = tok\n\n def get_token_at_index(self, idx: int):\n return self.idx_to_word[idx]\n\n def get_index_of_token(self, token: str):\n return self.word_to_idx[token]\n\n def size(self):\n return len(self.word_to_idx)\n\n\nclass PropagandaDataset(Dataset):\n\n def __init__(self, fold: str, examples: List[str], labels: List[int],\n vocab: Vocabulary):\n \"\"\"\n :type vocab: object\n :param fold: 'train'/'eval'/'test'\n :param examples: List of sentences/paragraphs\n :param labels: List of labels (1 if propaganda, 0 otherwise)\n \"\"\"\n self.fold = fold\n self.examples = examples\n self.labels = labels\n self.vocab = vocab\n\n def __getitem__(self, index: int) ->(torch.Tensor, torch.Tensor):\n \"\"\"\n This function converts an example to a Tensor containing the indices\n\n :param index: position of example to be retrieved.\n \"\"\"\n example, label = self.examples[index], self.labels[index]\n tokenizer = get_tokenizer('spacy')\n tokens = tokenizer(example)\n token_indices = []\n for i in tokens:\n token_indices.append(self.vocab.get_index_of_token(i))\n return torch.LongTensor(token_indices), torch.LongTensor(label)\n\n def __len__(self):\n \"\"\"\n Return the size of this dataset. This is given by the number\n of sentences.\n \"\"\"\n return len(self.examples)\n\n\n<mask token>\n\n\nclass RNN(nn.Module):\n\n def __init__(self, vocab_size: int, char_embedding_size: int, rnn_size: int\n ):\n super().__init__()\n self.vocab_size = vocab_size\n self.char_embedding_size = char_embedding_size\n self.rnn_size = rnn_size\n self.dropout = nn.Dropout(p=0.3)\n self.embedding = nn.Embedding(num_embeddings=vocab_size,\n embedding_dim=char_embedding_size)\n self.rnn = nn.LSTM(input_size=char_embedding_size, hidden_size=\n rnn_size, bidirectional=True)\n self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)\n self.loss = nn.CrossEntropyLoss()\n\n def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):\n \"\"\"\n Computes loss for a batch of sequences. The sequence loss is the\n average of the individual losses at each timestep. The batch loss is\n the average of sequence losses across all batches.\n\n :param logits: unnormalized probabilities for T timesteps, size\n batch_size x max_timesteps x vocab_size\n :param y: ground truth values (index of correct characters), size\n batch_size x max_timesteps\n :returns: loss as a scalar\n \"\"\"\n return self.loss(logits, y)\n\n def get_logits(self, hidden_states: torch.FloatTensor, temperature:\n float=1.0):\n \"\"\"\n Computes the unnormalized probabilities from hidden states. Optionally\n divide logits by a temperature, in order to influence predictions at\n test time (https://www.quora.com/What-is-Temperature-in-LSTM)\n\n :param hidden_states: tensor of size batch_size x timesteps x rnn_size\n :param temperature: coefficient that scales outputs before turning them\n to probabilities. A low temperature (0.1) results in more conservative\n predictions, while a higher temperature (0.9) results in more diverse\n predictions\n\n :return: tensor of size batch_size x timesteps x vocab_size\n \"\"\"\n return self.logits(hidden_states) / temperature\n\n def forward(self, batch: torch.LongTensor, hidden_start: torch.\n FloatTensor=None) ->torch.FloatTensor:\n \"\"\"\n Computes the hidden states for the current batch (x, y).\n :param x: input of size batch_size x max_len\n :param hidden_start: hidden state at time step t = 0,\n size batch_size x rnn_size\n :return: hidden states at all timesteps,\n size batch_size x timesteps x rnn_size\n \"\"\"\n x_embedded = self.embedding(batch)\n x_drop = self.dropout(x_embedded)\n hidden_state = self.rnn(x_drop)[0]\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n output = self.get_logits(hidden_state_pooled)\n return output\n\n\n<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model: nn.Module, train_data: torch.LongTensor,\n dev_data: torch.LongTensor, vocab: Vocabulary, hyperparams: Dict):\n self.model = model\n self.train_data = train_data\n self.dev_data = dev_data\n self.vocab = vocab\n if hyperparams['learning_algo'] == 'adam':\n self.optimizer = Adam(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n else:\n self.optimizer = SGD(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n self.num_epochs = hyperparams['num_epochs']\n self.max_len = hyperparams['max_len']\n self.batch_size = hyperparams['batch_size']\n self.rnn_size = hyperparams['rnn_size']\n self.max_grad_norm = hyperparams['max_grad_norm']\n self.train_size = len(train_data)\n self.dev_size = len(dev_data)\n self.num_train_examples = self.train_size // (self.batch_size *\n self.max_len) * self.batch_size\n\n def train_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the training set\n :param epoch_num: number of current epoch\n \"\"\"\n self.model.train()\n epoch_loss = 0.0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n self.optimizer.step()\n if batch_num % 100 == 0:\n print('epoch %d, %d/%d examples, batch loss = %f' % (\n epoch_num, (batch_num + 1) * self.batch_size, self.\n num_train_examples, batch_loss.item()))\n epoch_loss /= batch_num + 1\n return epoch_loss\n\n def eval_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the validation set\n :param epoch_num: number of current epoch\n \"\"\"\n epoch_loss = 0.0\n with torch.no_grad():\n acc = 0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n epoch_loss /= batch_num + 1\n return epoch_loss, acc\n\n def train(self) ->Dict:\n train_losses, dev_losses, dev_acc = [], [], []\n for epoch in range(self.num_epochs):\n epoch_train_loss = self.train_epoch(epoch)\n epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)\n train_losses.append(epoch_train_loss)\n dev_losses.append(epoch_dev_loss)\n dev_acc.append(epoch_dev_train)\n return {'train_losses': train_losses, 'dev_losses': dev_losses,\n 'dev_acc': epoch_dev_train}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vocabulary:\n \"\"\"\n Helper class that maps words to unique indices and the other way around\n \"\"\"\n\n def __init__(self, tokens: List[str]):\n self.word_to_idx = {'<PAD>': 0}\n for idx, tok in enumerate(tokens, 1):\n self.word_to_idx[tok] = idx\n self.idx_to_word = {}\n for tok, idx in self.word_to_idx.items():\n self.idx_to_word[idx] = tok\n\n def get_token_at_index(self, idx: int):\n return self.idx_to_word[idx]\n\n def get_index_of_token(self, token: str):\n return self.word_to_idx[token]\n\n def size(self):\n return len(self.word_to_idx)\n\n\nclass PropagandaDataset(Dataset):\n\n def __init__(self, fold: str, examples: List[str], labels: List[int],\n vocab: Vocabulary):\n \"\"\"\n :type vocab: object\n :param fold: 'train'/'eval'/'test'\n :param examples: List of sentences/paragraphs\n :param labels: List of labels (1 if propaganda, 0 otherwise)\n \"\"\"\n self.fold = fold\n self.examples = examples\n self.labels = labels\n self.vocab = vocab\n\n def __getitem__(self, index: int) ->(torch.Tensor, torch.Tensor):\n \"\"\"\n This function converts an example to a Tensor containing the indices\n\n :param index: position of example to be retrieved.\n \"\"\"\n example, label = self.examples[index], self.labels[index]\n tokenizer = get_tokenizer('spacy')\n tokens = tokenizer(example)\n token_indices = []\n for i in tokens:\n token_indices.append(self.vocab.get_index_of_token(i))\n return torch.LongTensor(token_indices), torch.LongTensor(label)\n\n def __len__(self):\n \"\"\"\n Return the size of this dataset. This is given by the number\n of sentences.\n \"\"\"\n return len(self.examples)\n\n\ndef collate_sentences(batch: List[Tuple]):\n \"\"\"\n This function converts a list of batch_size examples to\n a Tensor of size batch_size x max_len\n batch: [(example_1_tensor, example_1_label),\n ...\n (example_batch_size_tensor, example_batch_size_label)]\n \"\"\"\n batch_labels = []\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n for idx in range(0, batch_size):\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n print(type(batch_labels))\n batch_labels = torch.tensor(batch_labels)\n return batch_sentences, batch_labels\n\n\ndef fill_vocab(txt: List[Tuple]):\n tokenizer = get_tokenizer('spacy')\n list_v = []\n for i in txt:\n tok = tokenizer(i)\n for j in tok:\n if list_v.count(j) == 0:\n list_v.append(j)\n vocab = Vocabulary(tokens=list_v)\n return vocab\n\n\n<mask token>\n\n\nclass RNN(nn.Module):\n\n def __init__(self, vocab_size: int, char_embedding_size: int, rnn_size: int\n ):\n super().__init__()\n self.vocab_size = vocab_size\n self.char_embedding_size = char_embedding_size\n self.rnn_size = rnn_size\n self.dropout = nn.Dropout(p=0.3)\n self.embedding = nn.Embedding(num_embeddings=vocab_size,\n embedding_dim=char_embedding_size)\n self.rnn = nn.LSTM(input_size=char_embedding_size, hidden_size=\n rnn_size, bidirectional=True)\n self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)\n self.loss = nn.CrossEntropyLoss()\n\n def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):\n \"\"\"\n Computes loss for a batch of sequences. The sequence loss is the\n average of the individual losses at each timestep. The batch loss is\n the average of sequence losses across all batches.\n\n :param logits: unnormalized probabilities for T timesteps, size\n batch_size x max_timesteps x vocab_size\n :param y: ground truth values (index of correct characters), size\n batch_size x max_timesteps\n :returns: loss as a scalar\n \"\"\"\n return self.loss(logits, y)\n\n def get_logits(self, hidden_states: torch.FloatTensor, temperature:\n float=1.0):\n \"\"\"\n Computes the unnormalized probabilities from hidden states. Optionally\n divide logits by a temperature, in order to influence predictions at\n test time (https://www.quora.com/What-is-Temperature-in-LSTM)\n\n :param hidden_states: tensor of size batch_size x timesteps x rnn_size\n :param temperature: coefficient that scales outputs before turning them\n to probabilities. A low temperature (0.1) results in more conservative\n predictions, while a higher temperature (0.9) results in more diverse\n predictions\n\n :return: tensor of size batch_size x timesteps x vocab_size\n \"\"\"\n return self.logits(hidden_states) / temperature\n\n def forward(self, batch: torch.LongTensor, hidden_start: torch.\n FloatTensor=None) ->torch.FloatTensor:\n \"\"\"\n Computes the hidden states for the current batch (x, y).\n :param x: input of size batch_size x max_len\n :param hidden_start: hidden state at time step t = 0,\n size batch_size x rnn_size\n :return: hidden states at all timesteps,\n size batch_size x timesteps x rnn_size\n \"\"\"\n x_embedded = self.embedding(batch)\n x_drop = self.dropout(x_embedded)\n hidden_state = self.rnn(x_drop)[0]\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n output = self.get_logits(hidden_state_pooled)\n return output\n\n\n<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model: nn.Module, train_data: torch.LongTensor,\n dev_data: torch.LongTensor, vocab: Vocabulary, hyperparams: Dict):\n self.model = model\n self.train_data = train_data\n self.dev_data = dev_data\n self.vocab = vocab\n if hyperparams['learning_algo'] == 'adam':\n self.optimizer = Adam(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n else:\n self.optimizer = SGD(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n self.num_epochs = hyperparams['num_epochs']\n self.max_len = hyperparams['max_len']\n self.batch_size = hyperparams['batch_size']\n self.rnn_size = hyperparams['rnn_size']\n self.max_grad_norm = hyperparams['max_grad_norm']\n self.train_size = len(train_data)\n self.dev_size = len(dev_data)\n self.num_train_examples = self.train_size // (self.batch_size *\n self.max_len) * self.batch_size\n\n def train_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the training set\n :param epoch_num: number of current epoch\n \"\"\"\n self.model.train()\n epoch_loss = 0.0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n self.optimizer.step()\n if batch_num % 100 == 0:\n print('epoch %d, %d/%d examples, batch loss = %f' % (\n epoch_num, (batch_num + 1) * self.batch_size, self.\n num_train_examples, batch_loss.item()))\n epoch_loss /= batch_num + 1\n return epoch_loss\n\n def eval_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the validation set\n :param epoch_num: number of current epoch\n \"\"\"\n epoch_loss = 0.0\n with torch.no_grad():\n acc = 0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n epoch_loss /= batch_num + 1\n return epoch_loss, acc\n\n def train(self) ->Dict:\n train_losses, dev_losses, dev_acc = [], [], []\n for epoch in range(self.num_epochs):\n epoch_train_loss = self.train_epoch(epoch)\n epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)\n train_losses.append(epoch_train_loss)\n dev_losses.append(epoch_dev_loss)\n dev_acc.append(epoch_dev_train)\n return {'train_losses': train_losses, 'dev_losses': dev_losses,\n 'dev_acc': epoch_dev_train}\n\n\ndef plot_losses(metrics: Dict):\n \"\"\"\n Plots training/validation losses.\n :param metrics: dictionar\n \"\"\"\n plt.figure()\n plt.plot(metrics['train_losses'], c='b', label='Train')\n plt.plot(metrics['dev_losses'], c='g', label='Valid')\n plt.ylabel('Loss')\n plt.xlabel('Iteration')\n plt.legend()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_label(label_path):\n labels = []\n f = Path(label_path)\n if not f.exists():\n return labels\n for line in open(label_path):\n parts = line.strip().split('\\t')\n labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])\n labels = sorted(labels)\n if labels:\n length = max([label[1] for label in labels])\n visit = np.zeros(length)\n res = []\n for label in labels:\n if sum(visit[label[0]:label[1]]):\n label[3] = 1\n else:\n visit[label[0]:label[1]] = 1\n res.append(label)\n return res\n else:\n return labels\n\n\ndef clean_text(articles, ids):\n texts = []\n for article, id in zip(articles, ids):\n sentences = article.split('\\n')\n end = -1\n res = []\n for sentence in sentences:\n start = end + 1\n end = start + len(sentence)\n if sentence != '':\n res.append([id, sentence, start, end])\n texts.append(res)\n return texts\n\n\n<mask token>\n\n\nclass Vocabulary:\n \"\"\"\n Helper class that maps words to unique indices and the other way around\n \"\"\"\n\n def __init__(self, tokens: List[str]):\n self.word_to_idx = {'<PAD>': 0}\n for idx, tok in enumerate(tokens, 1):\n self.word_to_idx[tok] = idx\n self.idx_to_word = {}\n for tok, idx in self.word_to_idx.items():\n self.idx_to_word[idx] = tok\n\n def get_token_at_index(self, idx: int):\n return self.idx_to_word[idx]\n\n def get_index_of_token(self, token: str):\n return self.word_to_idx[token]\n\n def size(self):\n return len(self.word_to_idx)\n\n\nclass PropagandaDataset(Dataset):\n\n def __init__(self, fold: str, examples: List[str], labels: List[int],\n vocab: Vocabulary):\n \"\"\"\n :type vocab: object\n :param fold: 'train'/'eval'/'test'\n :param examples: List of sentences/paragraphs\n :param labels: List of labels (1 if propaganda, 0 otherwise)\n \"\"\"\n self.fold = fold\n self.examples = examples\n self.labels = labels\n self.vocab = vocab\n\n def __getitem__(self, index: int) ->(torch.Tensor, torch.Tensor):\n \"\"\"\n This function converts an example to a Tensor containing the indices\n\n :param index: position of example to be retrieved.\n \"\"\"\n example, label = self.examples[index], self.labels[index]\n tokenizer = get_tokenizer('spacy')\n tokens = tokenizer(example)\n token_indices = []\n for i in tokens:\n token_indices.append(self.vocab.get_index_of_token(i))\n return torch.LongTensor(token_indices), torch.LongTensor(label)\n\n def __len__(self):\n \"\"\"\n Return the size of this dataset. This is given by the number\n of sentences.\n \"\"\"\n return len(self.examples)\n\n\ndef collate_sentences(batch: List[Tuple]):\n \"\"\"\n This function converts a list of batch_size examples to\n a Tensor of size batch_size x max_len\n batch: [(example_1_tensor, example_1_label),\n ...\n (example_batch_size_tensor, example_batch_size_label)]\n \"\"\"\n batch_labels = []\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n for idx in range(0, batch_size):\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n print(type(batch_labels))\n batch_labels = torch.tensor(batch_labels)\n return batch_sentences, batch_labels\n\n\ndef fill_vocab(txt: List[Tuple]):\n tokenizer = get_tokenizer('spacy')\n list_v = []\n for i in txt:\n tok = tokenizer(i)\n for j in tok:\n if list_v.count(j) == 0:\n list_v.append(j)\n vocab = Vocabulary(tokens=list_v)\n return vocab\n\n\n<mask token>\n\n\nclass RNN(nn.Module):\n\n def __init__(self, vocab_size: int, char_embedding_size: int, rnn_size: int\n ):\n super().__init__()\n self.vocab_size = vocab_size\n self.char_embedding_size = char_embedding_size\n self.rnn_size = rnn_size\n self.dropout = nn.Dropout(p=0.3)\n self.embedding = nn.Embedding(num_embeddings=vocab_size,\n embedding_dim=char_embedding_size)\n self.rnn = nn.LSTM(input_size=char_embedding_size, hidden_size=\n rnn_size, bidirectional=True)\n self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)\n self.loss = nn.CrossEntropyLoss()\n\n def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):\n \"\"\"\n Computes loss for a batch of sequences. The sequence loss is the\n average of the individual losses at each timestep. The batch loss is\n the average of sequence losses across all batches.\n\n :param logits: unnormalized probabilities for T timesteps, size\n batch_size x max_timesteps x vocab_size\n :param y: ground truth values (index of correct characters), size\n batch_size x max_timesteps\n :returns: loss as a scalar\n \"\"\"\n return self.loss(logits, y)\n\n def get_logits(self, hidden_states: torch.FloatTensor, temperature:\n float=1.0):\n \"\"\"\n Computes the unnormalized probabilities from hidden states. Optionally\n divide logits by a temperature, in order to influence predictions at\n test time (https://www.quora.com/What-is-Temperature-in-LSTM)\n\n :param hidden_states: tensor of size batch_size x timesteps x rnn_size\n :param temperature: coefficient that scales outputs before turning them\n to probabilities. A low temperature (0.1) results in more conservative\n predictions, while a higher temperature (0.9) results in more diverse\n predictions\n\n :return: tensor of size batch_size x timesteps x vocab_size\n \"\"\"\n return self.logits(hidden_states) / temperature\n\n def forward(self, batch: torch.LongTensor, hidden_start: torch.\n FloatTensor=None) ->torch.FloatTensor:\n \"\"\"\n Computes the hidden states for the current batch (x, y).\n :param x: input of size batch_size x max_len\n :param hidden_start: hidden state at time step t = 0,\n size batch_size x rnn_size\n :return: hidden states at all timesteps,\n size batch_size x timesteps x rnn_size\n \"\"\"\n x_embedded = self.embedding(batch)\n x_drop = self.dropout(x_embedded)\n hidden_state = self.rnn(x_drop)[0]\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n output = self.get_logits(hidden_state_pooled)\n return output\n\n\n<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model: nn.Module, train_data: torch.LongTensor,\n dev_data: torch.LongTensor, vocab: Vocabulary, hyperparams: Dict):\n self.model = model\n self.train_data = train_data\n self.dev_data = dev_data\n self.vocab = vocab\n if hyperparams['learning_algo'] == 'adam':\n self.optimizer = Adam(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n else:\n self.optimizer = SGD(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n self.num_epochs = hyperparams['num_epochs']\n self.max_len = hyperparams['max_len']\n self.batch_size = hyperparams['batch_size']\n self.rnn_size = hyperparams['rnn_size']\n self.max_grad_norm = hyperparams['max_grad_norm']\n self.train_size = len(train_data)\n self.dev_size = len(dev_data)\n self.num_train_examples = self.train_size // (self.batch_size *\n self.max_len) * self.batch_size\n\n def train_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the training set\n :param epoch_num: number of current epoch\n \"\"\"\n self.model.train()\n epoch_loss = 0.0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n self.optimizer.step()\n if batch_num % 100 == 0:\n print('epoch %d, %d/%d examples, batch loss = %f' % (\n epoch_num, (batch_num + 1) * self.batch_size, self.\n num_train_examples, batch_loss.item()))\n epoch_loss /= batch_num + 1\n return epoch_loss\n\n def eval_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the validation set\n :param epoch_num: number of current epoch\n \"\"\"\n epoch_loss = 0.0\n with torch.no_grad():\n acc = 0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n epoch_loss /= batch_num + 1\n return epoch_loss, acc\n\n def train(self) ->Dict:\n train_losses, dev_losses, dev_acc = [], [], []\n for epoch in range(self.num_epochs):\n epoch_train_loss = self.train_epoch(epoch)\n epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)\n train_losses.append(epoch_train_loss)\n dev_losses.append(epoch_dev_loss)\n dev_acc.append(epoch_dev_train)\n return {'train_losses': train_losses, 'dev_losses': dev_losses,\n 'dev_acc': epoch_dev_train}\n\n\ndef plot_losses(metrics: Dict):\n \"\"\"\n Plots training/validation losses.\n :param metrics: dictionar\n \"\"\"\n plt.figure()\n plt.plot(metrics['train_losses'], c='b', label='Train')\n plt.plot(metrics['dev_losses'], c='g', label='Valid')\n plt.ylabel('Loss')\n plt.xlabel('Iteration')\n plt.legend()\n plt.show()\n\n\n<mask token>\n",
"step-4": "import pickle\nimport pathlib\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.optim import SGD, Adam\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchtext.data import get_tokenizer\nfrom matplotlib import pyplot as plt\n<mask token>\n\n\ndef read_data(directory):\n ids = []\n texts = []\n labels = []\n for f in directory.glob('*.txt'):\n id = f.name.replace('article', '').replace('.txt', '')\n ids.append(id)\n texts.append(f.read_text('utf8'))\n labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))\n return ids, texts, labels\n\n\ndef parse_label(label_path):\n labels = []\n f = Path(label_path)\n if not f.exists():\n return labels\n for line in open(label_path):\n parts = line.strip().split('\\t')\n labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])\n labels = sorted(labels)\n if labels:\n length = max([label[1] for label in labels])\n visit = np.zeros(length)\n res = []\n for label in labels:\n if sum(visit[label[0]:label[1]]):\n label[3] = 1\n else:\n visit[label[0]:label[1]] = 1\n res.append(label)\n return res\n else:\n return labels\n\n\ndef clean_text(articles, ids):\n texts = []\n for article, id in zip(articles, ids):\n sentences = article.split('\\n')\n end = -1\n res = []\n for sentence in sentences:\n start = end + 1\n end = start + len(sentence)\n if sentence != '':\n res.append([id, sentence, start, end])\n texts.append(res)\n return texts\n\n\ndef make_dataset(texts, lbls):\n txt = []\n lbl = []\n for text, label in zip(texts, lbls):\n for Text in text:\n txt.append(Text[1])\n k = 0\n for l in label:\n if Text[2] < l[0] < Text[3]:\n lbl.append(1)\n k = 1\n break\n elif Text[2] < l[1] < Text[3]:\n lbl.append(1)\n k = 1\n break\n if k == 0:\n lbl.append(0)\n return txt, lbl\n\n\ndirectory = pathlib.Path('data/protechn_corpus_eval/train')\nids, texts, lbl = read_data(directory)\nids_train = ids\ntexts_train = texts\nlbl_train = lbl\ndirectory = pathlib.Path('data/protechn_corpus_eval/test')\nids_test, texts_test, lbl_test = read_data(directory)\ndirectory = pathlib.Path('data/protechn_corpus_eval/dev')\nids_dev, texts_dev, lbl_dev = read_data(directory)\ntxt_train = clean_text(texts_train, ids_train)\ntxt_test = clean_text(texts_test, ids_test)\ntxt_dev = clean_text(texts_dev, ids_dev)\ntrain_txt, train_lbl = make_dataset(txt_train, lbl_train)\ntest_txt, test_lbl = make_dataset(txt_test, lbl_test)\ndev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)\npickle.dump([dev_txt, dev_lbl], open('savedata/dev.txt', 'wb'))\npickle.dump([test_txt, test_lbl], open('savedata/test.txt', 'wb'))\npickle.dump([train_txt, train_lbl], open('savedata/train.txt', 'wb'))\ntrain_txt, train_lbl = pickle.load(open('savedata/train.txt', 'rb'))\ntest_txt, test_lbl = pickle.load(open('savedata/test.txt', 'rb'))\ndev_txt, dev_lbl = pickle.load(open('savedata/dev.txt', 'rb'))\n<mask token>\n\n\nclass Vocabulary:\n \"\"\"\n Helper class that maps words to unique indices and the other way around\n \"\"\"\n\n def __init__(self, tokens: List[str]):\n self.word_to_idx = {'<PAD>': 0}\n for idx, tok in enumerate(tokens, 1):\n self.word_to_idx[tok] = idx\n self.idx_to_word = {}\n for tok, idx in self.word_to_idx.items():\n self.idx_to_word[idx] = tok\n\n def get_token_at_index(self, idx: int):\n return self.idx_to_word[idx]\n\n def get_index_of_token(self, token: str):\n return self.word_to_idx[token]\n\n def size(self):\n return len(self.word_to_idx)\n\n\nclass PropagandaDataset(Dataset):\n\n def __init__(self, fold: str, examples: List[str], labels: List[int],\n vocab: Vocabulary):\n \"\"\"\n :type vocab: object\n :param fold: 'train'/'eval'/'test'\n :param examples: List of sentences/paragraphs\n :param labels: List of labels (1 if propaganda, 0 otherwise)\n \"\"\"\n self.fold = fold\n self.examples = examples\n self.labels = labels\n self.vocab = vocab\n\n def __getitem__(self, index: int) ->(torch.Tensor, torch.Tensor):\n \"\"\"\n This function converts an example to a Tensor containing the indices\n\n :param index: position of example to be retrieved.\n \"\"\"\n example, label = self.examples[index], self.labels[index]\n tokenizer = get_tokenizer('spacy')\n tokens = tokenizer(example)\n token_indices = []\n for i in tokens:\n token_indices.append(self.vocab.get_index_of_token(i))\n return torch.LongTensor(token_indices), torch.LongTensor(label)\n\n def __len__(self):\n \"\"\"\n Return the size of this dataset. This is given by the number\n of sentences.\n \"\"\"\n return len(self.examples)\n\n\ndef collate_sentences(batch: List[Tuple]):\n \"\"\"\n This function converts a list of batch_size examples to\n a Tensor of size batch_size x max_len\n batch: [(example_1_tensor, example_1_label),\n ...\n (example_batch_size_tensor, example_batch_size_label)]\n \"\"\"\n batch_labels = []\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n for idx in range(0, batch_size):\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n print(type(batch_labels))\n batch_labels = torch.tensor(batch_labels)\n return batch_sentences, batch_labels\n\n\ndef fill_vocab(txt: List[Tuple]):\n tokenizer = get_tokenizer('spacy')\n list_v = []\n for i in txt:\n tok = tokenizer(i)\n for j in tok:\n if list_v.count(j) == 0:\n list_v.append(j)\n vocab = Vocabulary(tokens=list_v)\n return vocab\n\n\nfull_text = train_txt + dev_txt\nvocab = fill_vocab(full_text)\ntest_vocab = fill_vocab(test_txt)\ntrain_vocab = fill_vocab(train_txt)\ndev_vocab = fill_vocab(dev_txt)\npickle.dump(dev_vocab, open('savedata/dev_vocab.txt', 'wb'))\npickle.dump(test_vocab, open('savedata/test_vocab.txt', 'wb'))\npickle.dump(train_vocab, open('savedata/train_vocab.txt', 'wb'))\npickle.dump(vocab, open('savedata/vocab.txt', 'wb'))\ndev_vocab = pickle.load(open('savedata/dev_vocab.txt', 'rb'))\ntest_vocab = pickle.load(open('savedata/test_vocab.txt', 'rb'))\ntrain_vocab = pickle.load(open('savedata/train_vocab.txt', 'rb'))\nvocab = pickle.load(open('savedata/vocab.txt', 'rb'))\ndataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)\ntrain_loader = DataLoader(dataset_train, batch_size=16, collate_fn=\n collate_sentences)\ndataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)\ntest_loader = DataLoader(dataset_test, batch_size=16, collate_fn=\n collate_sentences)\ndataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)\ndev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=\n collate_sentences)\npickle.dump(train_loader, open('savedata/train_loaded.txt', 'wb'))\npickle.dump(test_loader, open('savedata/test_loaded.txt', 'wb'))\npickle.dump(dev_loader, open('savedata/dev_loaded.txt', 'wb'))\ntrain_loader = pickle.load(open('savedata/train_loaded.txt', 'rb'))\ntest_loader = pickle.load(open('savedata/test_loaded.txt', 'rb'))\ndev_loader = pickle.load(open('savedata/dev_loaded.txt', 'rb'))\n<mask token>\n_hyperparameters_dict = {'batch_size': 64, 'num_epochs': 10, 'max_len': 250,\n 'embedding_size': 128, 'rnn_size': 256, 'learning_algo': 'adam',\n 'learning_rate': 0.001, 'max_grad_norm': 5.0}\n\n\nclass RNN(nn.Module):\n\n def __init__(self, vocab_size: int, char_embedding_size: int, rnn_size: int\n ):\n super().__init__()\n self.vocab_size = vocab_size\n self.char_embedding_size = char_embedding_size\n self.rnn_size = rnn_size\n self.dropout = nn.Dropout(p=0.3)\n self.embedding = nn.Embedding(num_embeddings=vocab_size,\n embedding_dim=char_embedding_size)\n self.rnn = nn.LSTM(input_size=char_embedding_size, hidden_size=\n rnn_size, bidirectional=True)\n self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)\n self.loss = nn.CrossEntropyLoss()\n\n def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):\n \"\"\"\n Computes loss for a batch of sequences. The sequence loss is the\n average of the individual losses at each timestep. The batch loss is\n the average of sequence losses across all batches.\n\n :param logits: unnormalized probabilities for T timesteps, size\n batch_size x max_timesteps x vocab_size\n :param y: ground truth values (index of correct characters), size\n batch_size x max_timesteps\n :returns: loss as a scalar\n \"\"\"\n return self.loss(logits, y)\n\n def get_logits(self, hidden_states: torch.FloatTensor, temperature:\n float=1.0):\n \"\"\"\n Computes the unnormalized probabilities from hidden states. Optionally\n divide logits by a temperature, in order to influence predictions at\n test time (https://www.quora.com/What-is-Temperature-in-LSTM)\n\n :param hidden_states: tensor of size batch_size x timesteps x rnn_size\n :param temperature: coefficient that scales outputs before turning them\n to probabilities. A low temperature (0.1) results in more conservative\n predictions, while a higher temperature (0.9) results in more diverse\n predictions\n\n :return: tensor of size batch_size x timesteps x vocab_size\n \"\"\"\n return self.logits(hidden_states) / temperature\n\n def forward(self, batch: torch.LongTensor, hidden_start: torch.\n FloatTensor=None) ->torch.FloatTensor:\n \"\"\"\n Computes the hidden states for the current batch (x, y).\n :param x: input of size batch_size x max_len\n :param hidden_start: hidden state at time step t = 0,\n size batch_size x rnn_size\n :return: hidden states at all timesteps,\n size batch_size x timesteps x rnn_size\n \"\"\"\n x_embedded = self.embedding(batch)\n x_drop = self.dropout(x_embedded)\n hidden_state = self.rnn(x_drop)[0]\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n output = self.get_logits(hidden_state_pooled)\n return output\n\n\nnetwork = RNN(vocab.size(), _hyperparameters_dict['embedding_size'],\n _hyperparameters_dict['rnn_size'])\noptimizer = Adam(params=network.parameters(), lr=0.001)\nprint('Neural network parameters: ')\nfor param_name, param in network.named_parameters():\n print('\\t' + param_name, ' size: ', param.size())\n<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model: nn.Module, train_data: torch.LongTensor,\n dev_data: torch.LongTensor, vocab: Vocabulary, hyperparams: Dict):\n self.model = model\n self.train_data = train_data\n self.dev_data = dev_data\n self.vocab = vocab\n if hyperparams['learning_algo'] == 'adam':\n self.optimizer = Adam(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n else:\n self.optimizer = SGD(params=self.model.parameters(), lr=\n hyperparams['learning_rate'])\n self.num_epochs = hyperparams['num_epochs']\n self.max_len = hyperparams['max_len']\n self.batch_size = hyperparams['batch_size']\n self.rnn_size = hyperparams['rnn_size']\n self.max_grad_norm = hyperparams['max_grad_norm']\n self.train_size = len(train_data)\n self.dev_size = len(dev_data)\n self.num_train_examples = self.train_size // (self.batch_size *\n self.max_len) * self.batch_size\n\n def train_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the training set\n :param epoch_num: number of current epoch\n \"\"\"\n self.model.train()\n epoch_loss = 0.0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n self.optimizer.step()\n if batch_num % 100 == 0:\n print('epoch %d, %d/%d examples, batch loss = %f' % (\n epoch_num, (batch_num + 1) * self.batch_size, self.\n num_train_examples, batch_loss.item()))\n epoch_loss /= batch_num + 1\n return epoch_loss\n\n def eval_epoch(self, epoch_num: int) ->float:\n \"\"\"\n Compute the loss on the validation set\n :param epoch_num: number of current epoch\n \"\"\"\n epoch_loss = 0.0\n with torch.no_grad():\n acc = 0\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n x, y = batch_tuple\n hidden_states = self.model(x)\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n epoch_loss /= batch_num + 1\n return epoch_loss, acc\n\n def train(self) ->Dict:\n train_losses, dev_losses, dev_acc = [], [], []\n for epoch in range(self.num_epochs):\n epoch_train_loss = self.train_epoch(epoch)\n epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)\n train_losses.append(epoch_train_loss)\n dev_losses.append(epoch_dev_loss)\n dev_acc.append(epoch_dev_train)\n return {'train_losses': train_losses, 'dev_losses': dev_losses,\n 'dev_acc': epoch_dev_train}\n\n\ndef plot_losses(metrics: Dict):\n \"\"\"\n Plots training/validation losses.\n :param metrics: dictionar\n \"\"\"\n plt.figure()\n plt.plot(metrics['train_losses'], c='b', label='Train')\n plt.plot(metrics['dev_losses'], c='g', label='Valid')\n plt.ylabel('Loss')\n plt.xlabel('Iteration')\n plt.legend()\n plt.show()\n\n\ntrainer = Trainer(network, train_loader, dev_loader, vocab,\n _hyperparameters_dict)\nmetrics = trainer.train()\nplot_losses(metrics)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nimport pickle\nimport pathlib\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.optim import SGD, Adam\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchtext.data import get_tokenizer\nfrom matplotlib import pyplot as plt\n\n\"\"\"### **Preprocesare**\"\"\"\n\n\ndef read_data(directory):\n ids = []\n texts = []\n labels = []\n for f in directory.glob('*.txt'):\n id = f.name.replace('article', '').replace('.txt', '')\n ids.append(id)\n texts.append(f.read_text('utf8'))\n labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))\n # labels can be empty\n return ids, texts, labels\n\n\ndef parse_label(label_path):\n labels = []\n f = Path(label_path)\n\n if not f.exists():\n return labels\n\n for line in open(label_path):\n parts = line.strip().split('\\t')\n labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])\n labels = sorted(labels)\n\n if labels:\n length = max([label[1] for label in labels])\n visit = np.zeros(length)\n res = []\n for label in labels:\n if sum(visit[label[0]:label[1]]):\n label[3] = 1\n else:\n visit[label[0]:label[1]] = 1\n res.append(label)\n return res\n else:\n return labels\n\n\ndef clean_text(articles, ids):\n texts = []\n for article, id in zip(articles, ids):\n sentences = article.split('\\n')\n end = -1\n res = []\n for sentence in sentences:\n start = end + 1\n end = start + len(sentence) # length of sequence\n if sentence != \"\": # if not empty line\n res.append([id, sentence, start, end])\n texts.append(res)\n return texts\n\n\ndef make_dataset(texts, lbls):\n txt = []\n lbl = []\n for text, label in zip(texts, lbls):\n for Text in text:\n txt.append(Text[1])\n k = 0\n for l in label:\n if Text[2] < l[0] < Text[3]:\n lbl.append(1)\n k = 1\n break\n elif Text[2] < l[1] < Text[3]:\n lbl.append(1)\n k = 1\n break\n if k == 0:\n lbl.append(0)\n return txt, lbl\n\n\ndirectory = pathlib.Path('data/protechn_corpus_eval/train')\nids, texts,lbl = read_data(directory)\n\nids_train = ids\ntexts_train = texts\nlbl_train = lbl\ndirectory = pathlib.Path('data/protechn_corpus_eval/test')\nids_test, texts_test,lbl_test = read_data(directory)\ndirectory = pathlib.Path('data/protechn_corpus_eval/dev')\nids_dev, texts_dev,lbl_dev = read_data(directory)\n\ntxt_train = clean_text(texts_train, ids_train)\ntxt_test = clean_text(texts_test, ids_test)\ntxt_dev =clean_text(texts_dev, ids_dev)\n\ntrain_txt, train_lbl = make_dataset(txt_train, lbl_train)\ntest_txt, test_lbl = make_dataset(txt_test, lbl_test)\ndev_txt, dev_lbl = make_dataset(txt_dev, lbl_dev)\n\npickle.dump([dev_txt,dev_lbl], open(\"savedata/dev.txt\", \"wb\"))\npickle.dump([test_txt,test_lbl], open(\"savedata/test.txt\", \"wb\"))\npickle.dump([train_txt,train_lbl], open(\"savedata/train.txt\", \"wb\"))\n\ntrain_txt, train_lbl = pickle.load(open(\"savedata/train.txt\", \"rb\"))\ntest_txt, test_lbl = pickle.load(open(\"savedata/test.txt\", \"rb\"))\ndev_txt, dev_lbl = pickle.load(open(\"savedata/dev.txt\", \"rb\"))\n\n\"\"\"### **Dataset+ data_loader**\"\"\"\n\n\nclass Vocabulary:\n \"\"\"\n Helper class that maps words to unique indices and the other way around\n \"\"\"\n\n def __init__(self, tokens: List[str]):\n # dictionary that maps words to indices\n self.word_to_idx = {'<PAD>': 0}\n\n for idx, tok in enumerate(tokens, 1):\n self.word_to_idx[tok] = idx\n\n # dictionary that maps indices to words\n self.idx_to_word = {}\n for tok, idx in self.word_to_idx.items():\n self.idx_to_word[idx] = tok\n\n def get_token_at_index(self, idx: int):\n return self.idx_to_word[idx]\n\n def get_index_of_token(self, token: str):\n return self.word_to_idx[token]\n\n def size(self):\n return len(self.word_to_idx)\n\n\nclass PropagandaDataset(Dataset):\n def __init__(self,\n fold: str,\n examples: List[str],\n labels: List[int],\n vocab: Vocabulary):\n \"\"\"\n :type vocab: object\n :param fold: 'train'/'eval'/'test'\n :param examples: List of sentences/paragraphs\n :param labels: List of labels (1 if propaganda, 0 otherwise)\n \"\"\"\n self.fold = fold\n self.examples = examples\n self.labels = labels\n self.vocab = vocab\n\n def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor):\n \"\"\"\n This function converts an example to a Tensor containing the indices\n\n :param index: position of example to be retrieved.\n \"\"\"\n # retrieve sentence and label (correct class index)\n example, label = self.examples[index], self.labels[index]\n\n # tokenize sentence into words and other symbols\n tokenizer = get_tokenizer(\"spacy\")\n tokens = tokenizer(example)\n\n # convert tokens to their corresponding indices, according to\n # vocabulary\n token_indices = []\n for i in tokens:\n token_indices.append(self.vocab.get_index_of_token(i))\n\n return torch.LongTensor(token_indices), torch.LongTensor(label)\n\n def __len__(self):\n \"\"\"\n Return the size of this dataset. This is given by the number\n of sentences.\n \"\"\"\n return len(self.examples)\n\n\ndef collate_sentences(batch: List[Tuple]):\n \"\"\"\n This function converts a list of batch_size examples to\n a Tensor of size batch_size x max_len\n batch: [(example_1_tensor, example_1_label),\n ...\n (example_batch_size_tensor, example_batch_size_label)]\n \"\"\"\n # fill this list with all the labels in the batch\n batch_labels = []\n\n # we need to find the maximum length of a sentence in this batch\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n\n # print('batch size',batch_size)\n # initialize a Tensor filled with zeros (aka index of <PAD>)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n\n # fill each row idx in batch_sentences with the corresponding\n # sequence tensor\n #\n # ... batch_sentences[idx, ...] = ...\n for idx in range(0, batch_size):\n # print(idx)\n # print(len(batch[idx][0]))\n # print(len(batch_sentences[idx]))\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n # print(batch_sentences[idx])\n print(type(batch_labels))\n # batch_labels = [torch.LongTensor(x) for x in batch_labels]\n batch_labels = torch.tensor(batch_labels)\n # print(batch_labels)\n return batch_sentences, batch_labels\n\n\ndef fill_vocab(txt: List[Tuple]):\n tokenizer = get_tokenizer(\"spacy\")\n list_v = []\n for i in txt:\n tok = tokenizer(i)\n for j in tok:\n if list_v.count(j) == 0:\n list_v.append(j)\n vocab = Vocabulary(tokens=list_v)\n return vocab\n\nfull_text = train_txt + dev_txt\nvocab = fill_vocab(full_text)\n\ntest_vocab = fill_vocab(test_txt)\ntrain_vocab = fill_vocab(train_txt)\ndev_vocab = fill_vocab(dev_txt)\n\npickle.dump(dev_vocab, open(\"savedata/dev_vocab.txt\", \"wb\"))\npickle.dump(test_vocab, open(\"savedata/test_vocab.txt\", \"wb\"))\npickle.dump(train_vocab, open(\"savedata/train_vocab.txt\", \"wb\"))\n\npickle.dump(vocab, open(\"savedata/vocab.txt\", \"wb\"))\n\ndev_vocab = pickle.load(open(\"savedata/dev_vocab.txt\",\"rb\"))\ntest_vocab = pickle.load(open(\"savedata/test_vocab.txt\",\"rb\"))\ntrain_vocab = pickle.load(open(\"savedata/train_vocab.txt\",\"rb\"))\n\nvocab = pickle.load(open(\"savedata/vocab.txt\", \"rb\"))\n\ndataset_train = PropagandaDataset('train', train_txt, train_lbl, train_vocab)\ntrain_loader = DataLoader(dataset_train, batch_size=16, collate_fn=collate_sentences)\n\ndataset_test = PropagandaDataset('train', test_txt, test_lbl, test_vocab)\ntest_loader = DataLoader(dataset_test, batch_size=16, collate_fn=collate_sentences)\n\ndataset_dev = PropagandaDataset('train', dev_txt, dev_lbl, dev_vocab)\ndev_loader = DataLoader(dataset_dev, batch_size=16, collate_fn=collate_sentences)\n\npickle.dump(train_loader, open(\"savedata/train_loaded.txt\", \"wb\"))\npickle.dump(test_loader, open(\"savedata/test_loaded.txt\", \"wb\"))\npickle.dump(dev_loader, open(\"savedata/dev_loaded.txt\", \"wb\"))\n\ntrain_loader = pickle.load(open(\"savedata/train_loaded.txt\", \"rb\"))\ntest_loader = pickle.load(open(\"savedata/test_loaded.txt\", \"rb\"))\ndev_loader = pickle.load(open(\"savedata/dev_loaded.txt\", \"rb\"))\n\n\"\"\"### model\"\"\"\n\n############################## PARAMETERS ######################################\n_hyperparameters_dict = {\n \"batch_size\": 64,\n \"num_epochs\": 10, # 10,\n \"max_len\": 250,\n \"embedding_size\": 128, # 256,\n \"rnn_size\": 256, # 1024,\n \"learning_algo\": \"adam\",\n \"learning_rate\": 0.001,\n \"max_grad_norm\": 5.0\n}\n\n\nclass RNN(nn.Module):\n def __init__(self, vocab_size: int, char_embedding_size: int,\n rnn_size: int):\n super().__init__()\n self.vocab_size = vocab_size\n self.char_embedding_size = char_embedding_size\n self.rnn_size = rnn_size\n self.dropout = nn.Dropout(p=0.3)\n # instantiate Modules with the correct arguments\n self.embedding = nn.Embedding(num_embeddings=vocab_size,\n embedding_dim=char_embedding_size)\n self.rnn = nn.LSTM(input_size=char_embedding_size,\n hidden_size=rnn_size, bidirectional=True)\n\n # self.rnn_cell = nn.GRUCell(input_size = char_embedding_size,\n # hidden_size = rnn_size)\n self.logits = nn.Linear(in_features=2 * rnn_size, out_features=2)\n # self.softmax = nn.Softmax(dim = 2)\n\n self.loss = nn.CrossEntropyLoss()\n\n def get_loss(self, logits: torch.FloatTensor, y: torch.FloatTensor):\n \"\"\"\n Computes loss for a batch of sequences. The sequence loss is the\n average of the individual losses at each timestep. The batch loss is\n the average of sequence losses across all batches.\n\n :param logits: unnormalized probabilities for T timesteps, size\n batch_size x max_timesteps x vocab_size\n :param y: ground truth values (index of correct characters), size\n batch_size x max_timesteps\n :returns: loss as a scalar\n \"\"\"\n #\n # logits: B x T x vocab_size\n # B x T\n\n # cross entropy: B x vocab_size x T\n # B x T\n # vision: B x num_classes\n # B\n return self.loss(logits, y)\n\n def get_logits(self, hidden_states: torch.FloatTensor,\n temperature: float = 1.0):\n \"\"\"\n Computes the unnormalized probabilities from hidden states. Optionally\n divide logits by a temperature, in order to influence predictions at\n test time (https://www.quora.com/What-is-Temperature-in-LSTM)\n\n :param hidden_states: tensor of size batch_size x timesteps x rnn_size\n :param temperature: coefficient that scales outputs before turning them\n to probabilities. A low temperature (0.1) results in more conservative\n predictions, while a higher temperature (0.9) results in more diverse\n predictions\n\n :return: tensor of size batch_size x timesteps x vocab_size\n \"\"\"\n return self.logits(hidden_states) / temperature\n\n def forward(self, batch: torch.LongTensor,\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\n \"\"\"\n Computes the hidden states for the current batch (x, y).\n :param x: input of size batch_size x max_len\n :param hidden_start: hidden state at time step t = 0,\n size batch_size x rnn_size\n :return: hidden states at all timesteps,\n size batch_size x timesteps x rnn_size\n \"\"\"\n\n # max_len = x.size(1)\n # x,label = batch\n # batch_size x max_len x embedding_dim\n x_embedded = self.embedding(batch)\n # x_drop = self.dropout\n x_drop = self.dropout(x_embedded)\n\n # compute hidden states and logits for each time step\n # hidden_states_list = []\n # prev_hidden = hidden_start\n hidden_state = self.rnn(x_drop)[0]\n # print(hidden_state)\n # print(hidden_state[0].shape)\n # print(hidden_state[1].shape)\n\n # hidden_state = hidden_state.permute(2,1,0)\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n\n output = self.get_logits(hidden_state_pooled)\n\n # Loss = self.loss(output, y)\n\n # hidden_state = softmax(logits(hidden_state))\n\n # batch_size x max_len x rnn_size\n # hidden_states = torch.stack(hidden_states_list, dim=1)\n\n return output\n\n\n# instantiate the RNNLM module\nnetwork = RNN(vocab.size(),\n _hyperparameters_dict['embedding_size'],\n _hyperparameters_dict['rnn_size'])\n\n# if torch.cuda.is_available():\n# device = torch.device('cuda:0')\n# else:\n# device = torch.device('cpu')\n\n# move network to GPU if available\n# network = network.to(device)\n# device = torch.device('cpu')\n# network = network.to(device)\noptimizer = Adam(params=network.parameters(), lr=0.001)\n\n# CHECKPOINT: make sure you understand each parameter size\nprint(\"Neural network parameters: \")\nfor param_name, param in network.named_parameters():\n print(\"\\t\" + param_name, \" size: \", param.size())\n\n\"\"\"# Training/evaluation loop\"\"\"\n\n\n# Commented out IPython magic to ensure Python compatibility.\nclass Trainer:\n def __init__(self, model: nn.Module,\n train_data: torch.LongTensor,\n dev_data: torch.LongTensor,\n vocab: Vocabulary,\n hyperparams: Dict):\n self.model = model\n self.train_data = train_data\n self.dev_data = dev_data\n self.vocab = vocab\n # self.device = torch.device('cuda:0')\n if hyperparams['learning_algo'] == 'adam':\n self.optimizer = Adam(params=self.model.parameters(),\n lr=hyperparams['learning_rate'])\n else:\n self.optimizer = SGD(params=self.model.parameters(),\n lr=hyperparams['learning_rate'])\n self.num_epochs = hyperparams['num_epochs']\n self.max_len = hyperparams['max_len']\n self.batch_size = hyperparams['batch_size']\n self.rnn_size = hyperparams['rnn_size']\n self.max_grad_norm = hyperparams['max_grad_norm']\n\n # number of characters in training/dev data\n self.train_size = len(train_data)\n self.dev_size = len(dev_data)\n\n # number of sequences (X, Y) used for training\n self.num_train_examples = \\\n self.train_size // (self.batch_size * self.max_len) * self.batch_size\n\n def train_epoch(self, epoch_num: int) -> float:\n \"\"\"\n Compute the loss on the training set\n :param epoch_num: number of current epoch\n \"\"\"\n self.model.train()\n epoch_loss = 0.0\n # hidden_start = torch.zeros(self.batch_size, self.rnn_size)\n # for batch_num, (x, y) in enumerate(make_batches(self.train_data,\n # self.batch_size,\n # self.max_len)):\n\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n # reset gradients in train epoch\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n # compute hidden states\n # batch x timesteps x hidden_size\n x, y = batch_tuple\n # x = x.to(self.device)\n # y = y.to(self.device)\n hidden_states = self.model(x)\n # compute unnormalized probabilities\n # batch x timesteps x vocab_size\n # logits = self.model.get_logits(hidden_states)\n\n # compute loss\n # scalar\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n\n # backpropagation (gradient of loss wrt parameters)\n batch_loss.backward()\n\n # clip gradients if they get too large\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n\n # update parameters\n self.optimizer.step()\n\n # we use a stateful RNN, which means the first hidden state for the\n # next batch is the last hidden state of the current batch\n # hidden_states.detach_()\n # hidden_start = hidden_states[:,-1,:] # add comment\n if batch_num % 100 == 0:\n print(\"epoch %d, %d/%d examples, batch loss = %f\"\n % (epoch_num, (batch_num + 1) * self.batch_size,\n self.num_train_examples, batch_loss.item()))\n epoch_loss /= (batch_num + 1)\n\n return epoch_loss\n\n def eval_epoch(self, epoch_num: int) -> float:\n \"\"\"\n Compute the loss on the validation set\n :param epoch_num: number of current epoch\n \"\"\"\n epoch_loss = 0.0\n # hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)\n with torch.no_grad():\n # for batch_num, (x, y) in enumerate(make_batches(self.dev_data,\n # self.batch_size,\n # self.max_len)):\n acc = 0;\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n # reset gradients\n # self.optimizer.zero_grad()\n # x = len(batch_tuple[0])\n # y = len(batch_tuple[0][0])\n # batch x timesteps x hidden_size\n x, y = batch_tuple\n # x = x.to(self.device)\n # y = y.to(self.device)\n hidden_states = self.model(x)\n # batch x timesteps x vocab_size\n # logits = self.model.get_logits(hidden_states)\n\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n # we use a stateful RNN, which means the first hidden state for\n # the next batch is the last hidden state of the current batch\n # hidden_states.detach_()\n # hidden_start = hidden_states[:,-1,:]\n\n epoch_loss /= (batch_num + 1)\n\n return epoch_loss, acc\n\n def train(self) -> Dict:\n train_losses, dev_losses, dev_acc = [], [], []\n for epoch in range(self.num_epochs):\n epoch_train_loss = self.train_epoch(epoch)\n epoch_dev_loss, epoch_dev_train = self.eval_epoch(epoch)\n train_losses.append(epoch_train_loss)\n dev_losses.append(epoch_dev_loss)\n dev_acc.append(epoch_dev_train)\n return {\"train_losses\": train_losses,\n \"dev_losses\": dev_losses,\n \"dev_acc\": epoch_dev_train}\n\n\ndef plot_losses(metrics: Dict):\n \"\"\"\n Plots training/validation losses.\n :param metrics: dictionar\n \"\"\"\n plt.figure()\n plt.plot(metrics['train_losses'], c='b', label='Train')\n plt.plot(metrics['dev_losses'], c='g', label='Valid')\n plt.ylabel('Loss')\n plt.xlabel('Iteration')\n plt.legend()\n plt.show()\n\n\n# op= torch.rand(4)\n# thx = torch.rand(4)\n# thx[0] = op[0]\n# t = thx==op\n# print(t)\n# print(sum(t).item())\n\n# train network for some epoch\ntrainer = Trainer(network, train_loader, dev_loader, vocab, _hyperparameters_dict)\nmetrics = trainer.train()\n\n# plot training and validations losses each epoch\nplot_losses(metrics)\n\n# for i in train_loader:\n# print(len(i[0][0]))\n# print(len(i[0]))\n# print(i[0])\n# x = 1\n# while (True)\n# x = 0\n",
"step-ids": [
20,
23,
25,
30,
31
]
}
|
[
20,
23,
25,
30,
31
] |
'''
Can you print numbers from 1 to 100 without using any loop.
'''
# Use Recursion
|
normal
|
{
"blob_id": "cc703690151acd17430b5a9715e71a694fdeca10",
"index": 2116,
"step-1": "<mask token>\n",
"step-2": "'''\nCan you print numbers from 1 to 100 without using any loop.\n'''\n\n# Use Recursion",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.contrib import admin
from pharma_models.personas.models import Persona
admin.site.register(Persona)
|
normal
|
{
"blob_id": "59d04ebd9a45c6a179a2da1f88f728ba2af91c05",
"index": 590,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Persona)\n",
"step-3": "from django.contrib import admin\nfrom pharma_models.personas.models import Persona\nadmin.site.register(Persona)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from handler.auth import provider_required
from handler.provider import ProviderBaseHandler
from forms.provider import ProviderAddressForm, ProviderVanityURLForm
import logging
from data import db
from util import saved_message
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info("provider dump before edit:" + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)
@provider_required
def post(self, vanity_url=None):
form = ProviderAddressForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
|
normal
|
{
"blob_id": "454f885e2254295ce6508e70c0348f5cbe855520",
"index": 5071,
"step-1": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n <mask token>\n <mask token>\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-2": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n <mask token>\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-3": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form, success_message=saved_message)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-4": "from handler.auth import provider_required\nfrom handler.provider import ProviderBaseHandler\nfrom forms.provider import ProviderAddressForm, ProviderVanityURLForm\nimport logging\nfrom data import db\nfrom util import saved_message\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form, success_message=saved_message)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-5": "from handler.auth import provider_required\nfrom handler.provider import ProviderBaseHandler\nfrom forms.provider import ProviderAddressForm, ProviderVanityURLForm\nimport logging\nfrom data import db\nfrom util import saved_message\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info(\"provider dump before edit:\" + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n \n if form.validate():\n # Store Provider\n provider = db.get_provider_from_vanity_url(vanity_url)\n \n form.populate_obj(provider)\n provider.put()\n\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)\n\n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Success\")\n\n else:\n # show validation error\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)\n \n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Validation Error\")\n\n\n\n \n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n \n if form.validate():\n # Store Provider\n provider = db.get_provider_from_vanity_url(vanity_url)\n \n form.populate_obj(provider)\n \n provider.put()\n\n self.redirect('/provider/address/' + provider.vanity_url)\n\n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Success\")\n\n else:\n # show validation error\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=address_form, vanity_url_form=form)\n \n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Validation Error\")\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# 다이얼
dial = ['ABC', 'DEF', 'GHI','JKL','MNO','PQRS','TUV','WXYZ']
cha = input()
num = 0
for i in range(len(cha)):
for j in dial:
if cha[i] in j:
num = num + dial.index(j) + 3
print(num)
|
normal
|
{
"blob_id": "774e607c693fa2d5199582302e466674f65b6449",
"index": 6213,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(cha)):\n for j in dial:\n if cha[i] in j:\n num = num + dial.index(j) + 3\nprint(num)\n",
"step-3": "dial = ['ABC', 'DEF', 'GHI', 'JKL', 'MNO', 'PQRS', 'TUV', 'WXYZ']\ncha = input()\nnum = 0\nfor i in range(len(cha)):\n for j in dial:\n if cha[i] in j:\n num = num + dial.index(j) + 3\nprint(num)\n",
"step-4": "# 다이얼\ndial = ['ABC', 'DEF', 'GHI','JKL','MNO','PQRS','TUV','WXYZ']\ncha = input()\n\nnum = 0\nfor i in range(len(cha)):\n for j in dial:\n if cha[i] in j:\n num = num + dial.index(j) + 3\nprint(num)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class UnknownResponseFormat(Exception):
pass
|
flexible
|
{
"blob_id": "e5e460eb704e2ab5f747d1beee05e012ea95fbd2",
"index": 3871,
"step-1": "<mask token>\n",
"step-2": "class UnknownResponseFormat(Exception):\n pass\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from pptx import Presentation
import csv
prs = Presentation()
slide_layout = prs.slide_layouts[1]
slide = prs.slides.add_slide(slide_layout)
shapes = slide.shapes
title_shape = shapes.title
body_shape = shapes.placeholders[1]
title_shape.text = "Tekst"
tf = body_shape.text_frame
tf.text = "Zawartość tekst frame"
with open("report.csv") as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row in data:
p = tf.add_paragraph()
p.text = row[0]
p.level = 1
p = tf.add_paragraph()
p.text = row[1]
p.level = 2
prs.save("raport.pptx")
|
normal
|
{
"blob_id": "e1f003b6a687e5654a1ee6c595e789ced02cd6c3",
"index": 7086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n",
"step-3": "<mask token>\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\ntitle_shape = shapes.title\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = 'Tekst'\ntf = body_shape.text_frame\ntf.text = 'Zawartość tekst frame'\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n",
"step-4": "from pptx import Presentation\nimport csv\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\ntitle_shape = shapes.title\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = 'Tekst'\ntf = body_shape.text_frame\ntf.text = 'Zawartość tekst frame'\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n",
"step-5": "from pptx import Presentation\nimport csv\n\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\n\ntitle_shape = shapes.title\n\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = \"Tekst\"\n\ntf = body_shape.text_frame\ntf.text = \"Zawartość tekst frame\"\nwith open(\"report.csv\") as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\n\nprs.save(\"raport.pptx\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def alt(h, dt):
t = 0
while True:
t = t + 1
a = -6 * t ** 4 + h * t ** 3 + 2 * t ** 2 + t
if a <= 0:
print('The balloon first touches ground at hour:')
print(t)
break
elif t == dt:
print('The balloon does not touch ground in the given time.')
break
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def alt(h, dt):
t = 0
while True:
t = t + 1
a = -6 * t ** 4 + h * t ** 3 + 2 * t ** 2 + t
if a <= 0:
print('The balloon first touches ground at hour:')
print(t)
break
elif t == dt:
print('The balloon does not touch ground in the given time.')
break
return
alt(int(input()), int(input()))
<|reserved_special_token_1|>
def alt(h, dt):
t=0
while True:
t=t+1
a=(-6)*(t**4)+ h*(t**3)+2*(t**2)+t
if a<=0:
print('The balloon first touches ground at hour:')
print(t)
break
elif t==dt:
print('The balloon does not touch ground in the given time.')
break
return
alt(int(input()), int(input()))
|
flexible
|
{
"blob_id": "592f29f08637e511bd7d49a3b58f69b700721d89",
"index": 8083,
"step-1": "<mask token>\n",
"step-2": "def alt(h, dt):\n t = 0\n while True:\n t = t + 1\n a = -6 * t ** 4 + h * t ** 3 + 2 * t ** 2 + t\n if a <= 0:\n print('The balloon first touches ground at hour:')\n print(t)\n break\n elif t == dt:\n print('The balloon does not touch ground in the given time.')\n break\n return\n\n\n<mask token>\n",
"step-3": "def alt(h, dt):\n t = 0\n while True:\n t = t + 1\n a = -6 * t ** 4 + h * t ** 3 + 2 * t ** 2 + t\n if a <= 0:\n print('The balloon first touches ground at hour:')\n print(t)\n break\n elif t == dt:\n print('The balloon does not touch ground in the given time.')\n break\n return\n\n\nalt(int(input()), int(input()))\n",
"step-4": "def alt(h, dt):\n t=0\n while True:\n t=t+1\n a=(-6)*(t**4)+ h*(t**3)+2*(t**2)+t\n \n if a<=0:\n print('The balloon first touches ground at hour:')\n print(t)\n break\n elif t==dt:\n print('The balloon does not touch ground in the given time.')\n break\n return\nalt(int(input()), int(input()))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.0.8 on 2020-07-29 18:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scenario', '0005_auto_20200729_1149'),
]
operations = [
migrations.RemoveField(
model_name='weapon',
name='vehicle',
),
migrations.DeleteModel(
name='Vehicle',
),
]
|
normal
|
{
"blob_id": "b99093fb13c59d4b9bb0a4f32fb62423d6752118",
"index": 6480,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('scenario', '0005_auto_20200729_1149')]\n operations = [migrations.RemoveField(model_name='weapon', name=\n 'vehicle'), migrations.DeleteModel(name='Vehicle')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('scenario', '0005_auto_20200729_1149')]\n operations = [migrations.RemoveField(model_name='weapon', name=\n 'vehicle'), migrations.DeleteModel(name='Vehicle')]\n",
"step-5": "# Generated by Django 3.0.8 on 2020-07-29 18:30\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scenario', '0005_auto_20200729_1149'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='weapon',\n name='vehicle',\n ),\n migrations.DeleteModel(\n name='Vehicle',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def take_second(element):
return element[1]
<|reserved_special_token_0|>
def get_random_name():
name = ''
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def take_second(element):
return element[1]
<|reserved_special_token_0|>
def get_random_name():
name = ''
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
<|reserved_special_token_0|>
print(sorted(imenik, key=take_second))
for i in range(100000):
novi_element = random.randint(1, 10000), get_random_name()
imenik.append(novi_element)
imenik.sort(key=take_second)
print(imenik)
<|reserved_special_token_0|>
while True:
mid_index = (max_index + min_index) // 2
guess_score = imenik[mid_index][0]
guess_name = imenik[mid_index][1]
if guess_name == previous_guess_name:
print('Not found')
break
if guess_name == name:
print('your score is', guess_score)
break
elif name > guess_name:
min_index = mid_index
else:
max_index = mid_index
previous_guess_name = guess_name
counter += 1
print('Number of comparisons', counter)
print('after')
<|reserved_special_token_0|>
for i in range(len(imenik)):
counter += 1
if imenik[i][1] == name:
print('your score is', guess_score)
found = True
break
if not found:
print('Not found')
print('Number of comparisons after', counter)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def take_second(element):
return element[1]
<|reserved_special_token_0|>
def get_random_name():
name = ''
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
imenik = [(777, 'zejneba'), (324, 'fahro'), (23, 'fatih'), (2334, 'muamer'),
(435, 'kerim'), (4568, 'zzzzzzz')]
print(sorted(imenik, key=take_second))
for i in range(100000):
novi_element = random.randint(1, 10000), get_random_name()
imenik.append(novi_element)
imenik.sort(key=take_second)
print(imenik)
name = input('enter a name: ')
min_index = 0
max_index = len(imenik)
previous_guess_name = ''
counter = 0
while True:
mid_index = (max_index + min_index) // 2
guess_score = imenik[mid_index][0]
guess_name = imenik[mid_index][1]
if guess_name == previous_guess_name:
print('Not found')
break
if guess_name == name:
print('your score is', guess_score)
break
elif name > guess_name:
min_index = mid_index
else:
max_index = mid_index
previous_guess_name = guess_name
counter += 1
print('Number of comparisons', counter)
print('after')
found = False
counter = 0
for i in range(len(imenik)):
counter += 1
if imenik[i][1] == name:
print('your score is', guess_score)
found = True
break
if not found:
print('Not found')
print('Number of comparisons after', counter)
<|reserved_special_token_1|>
import random
def take_second(element):
return element[1]
import string
def get_random_name():
name = ''
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
imenik = [(777, 'zejneba'), (324, 'fahro'), (23, 'fatih'), (2334, 'muamer'),
(435, 'kerim'), (4568, 'zzzzzzz')]
print(sorted(imenik, key=take_second))
for i in range(100000):
novi_element = random.randint(1, 10000), get_random_name()
imenik.append(novi_element)
imenik.sort(key=take_second)
print(imenik)
name = input('enter a name: ')
min_index = 0
max_index = len(imenik)
previous_guess_name = ''
counter = 0
while True:
mid_index = (max_index + min_index) // 2
guess_score = imenik[mid_index][0]
guess_name = imenik[mid_index][1]
if guess_name == previous_guess_name:
print('Not found')
break
if guess_name == name:
print('your score is', guess_score)
break
elif name > guess_name:
min_index = mid_index
else:
max_index = mid_index
previous_guess_name = guess_name
counter += 1
print('Number of comparisons', counter)
print('after')
found = False
counter = 0
for i in range(len(imenik)):
counter += 1
if imenik[i][1] == name:
print('your score is', guess_score)
found = True
break
if not found:
print('Not found')
print('Number of comparisons after', counter)
<|reserved_special_token_1|>
import random
def take_second(element):
return element[1]
import string
def get_random_name():
name = ""
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
imenik = [(777, "zejneba"), (324, "fahro"), (23, "fatih"), (2334, "muamer"), (435, "kerim"),(4568,"zzzzzzz")]
print(sorted(imenik,key=take_second))
for i in range(100000):
novi_element = (random.randint(1, 10000), get_random_name())
imenik.append(novi_element)
imenik.sort(key=take_second)
print(imenik)
name = input('enter a name: ')
min_index = 0
max_index = len(imenik)
previous_guess_name = ""
counter = 0
while True:
mid_index = (max_index + min_index) // 2
guess_score = imenik[mid_index][0]
guess_name = imenik[mid_index][1]
if guess_name == previous_guess_name:
print("Not found")
break
if guess_name == name:
print("your score is", guess_score)
break
elif name > guess_name:
min_index = mid_index
else:
max_index = mid_index
previous_guess_name = guess_name
counter += 1
print("Number of comparisons", counter)
print("after")
found = False
counter = 0
for i in range(len(imenik)):
counter += 1
if imenik[i][1] == name:
print("your score is", guess_score)
found = True
break
if not found:
print("Not found")
print("Number of comparisons after", counter)
|
flexible
|
{
"blob_id": "21ef8103a5880a07d8c681b2367c2beef727260f",
"index": 6536,
"step-1": "<mask token>\n\n\ndef take_second(element):\n return element[1]\n\n\n<mask token>\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef take_second(element):\n return element[1]\n\n\n<mask token>\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\n<mask token>\nprint(sorted(imenik, key=take_second))\nfor i in range(100000):\n novi_element = random.randint(1, 10000), get_random_name()\n imenik.append(novi_element)\nimenik.sort(key=take_second)\nprint(imenik)\n<mask token>\nwhile True:\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n if guess_name == previous_guess_name:\n print('Not found')\n break\n if guess_name == name:\n print('your score is', guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n previous_guess_name = guess_name\n counter += 1\nprint('Number of comparisons', counter)\nprint('after')\n<mask token>\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print('your score is', guess_score)\n found = True\n break\nif not found:\n print('Not found')\nprint('Number of comparisons after', counter)\n",
"step-3": "<mask token>\n\n\ndef take_second(element):\n return element[1]\n\n\n<mask token>\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\nimenik = [(777, 'zejneba'), (324, 'fahro'), (23, 'fatih'), (2334, 'muamer'),\n (435, 'kerim'), (4568, 'zzzzzzz')]\nprint(sorted(imenik, key=take_second))\nfor i in range(100000):\n novi_element = random.randint(1, 10000), get_random_name()\n imenik.append(novi_element)\nimenik.sort(key=take_second)\nprint(imenik)\nname = input('enter a name: ')\nmin_index = 0\nmax_index = len(imenik)\nprevious_guess_name = ''\ncounter = 0\nwhile True:\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n if guess_name == previous_guess_name:\n print('Not found')\n break\n if guess_name == name:\n print('your score is', guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n previous_guess_name = guess_name\n counter += 1\nprint('Number of comparisons', counter)\nprint('after')\nfound = False\ncounter = 0\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print('your score is', guess_score)\n found = True\n break\nif not found:\n print('Not found')\nprint('Number of comparisons after', counter)\n",
"step-4": "import random\n\n\ndef take_second(element):\n return element[1]\n\n\nimport string\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\nimenik = [(777, 'zejneba'), (324, 'fahro'), (23, 'fatih'), (2334, 'muamer'),\n (435, 'kerim'), (4568, 'zzzzzzz')]\nprint(sorted(imenik, key=take_second))\nfor i in range(100000):\n novi_element = random.randint(1, 10000), get_random_name()\n imenik.append(novi_element)\nimenik.sort(key=take_second)\nprint(imenik)\nname = input('enter a name: ')\nmin_index = 0\nmax_index = len(imenik)\nprevious_guess_name = ''\ncounter = 0\nwhile True:\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n if guess_name == previous_guess_name:\n print('Not found')\n break\n if guess_name == name:\n print('your score is', guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n previous_guess_name = guess_name\n counter += 1\nprint('Number of comparisons', counter)\nprint('after')\nfound = False\ncounter = 0\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print('your score is', guess_score)\n found = True\n break\nif not found:\n print('Not found')\nprint('Number of comparisons after', counter)\n",
"step-5": "import random\n\n\ndef take_second(element):\n return element[1]\n\n\nimport string\n\n\ndef get_random_name():\n name = \"\"\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\nimenik = [(777, \"zejneba\"), (324, \"fahro\"), (23, \"fatih\"), (2334, \"muamer\"), (435, \"kerim\"),(4568,\"zzzzzzz\")]\n\nprint(sorted(imenik,key=take_second))\nfor i in range(100000):\n novi_element = (random.randint(1, 10000), get_random_name())\n imenik.append(novi_element)\n\nimenik.sort(key=take_second)\nprint(imenik)\n\nname = input('enter a name: ')\n\nmin_index = 0\nmax_index = len(imenik)\n\nprevious_guess_name = \"\"\ncounter = 0\nwhile True:\n\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n\n if guess_name == previous_guess_name:\n print(\"Not found\")\n break\n\n if guess_name == name:\n print(\"your score is\", guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n\n previous_guess_name = guess_name\n counter += 1\n\nprint(\"Number of comparisons\", counter)\n\nprint(\"after\")\nfound = False\ncounter = 0\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print(\"your score is\", guess_score)\n found = True\n break\n\nif not found:\n print(\"Not found\")\n\nprint(\"Number of comparisons after\", counter)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(count):
print('Enter details for student', i + 1, 'below:')
rollNo = int(input('Rollno: '))
name = input('Name: ')
marks = float(input('Marks: '))
records = str(rollNo) + ',' + name + ',' + str(marks) + '\n'
fileObj.write(records)
fileObj.close()
<|reserved_special_token_1|>
count = int(input('How many students are there in class? '))
fileObj = open('marks.txt', 'w')
for i in range(count):
print('Enter details for student', i + 1, 'below:')
rollNo = int(input('Rollno: '))
name = input('Name: ')
marks = float(input('Marks: '))
records = str(rollNo) + ',' + name + ',' + str(marks) + '\n'
fileObj.write(records)
fileObj.close()
<|reserved_special_token_1|>
#Get roll numbers, name & marks of the students of a class(get from user) and store these details in a file- marks.txt
count = int(input("How many students are there in class? "))
fileObj = open('marks.txt',"w")
for i in range(count):
print("Enter details for student",(i+1),"below:")
rollNo = int(input("Rollno: "))
name = input("Name: ")
marks = float(input("Marks: "))
records = str(rollNo) + "," + name + "," + str(marks) + '\n'
fileObj.write(records)
fileObj.close()
|
flexible
|
{
"blob_id": "74cb06ffa41748af431b46c9ff98eb91771a5015",
"index": 537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(count):\n print('Enter details for student', i + 1, 'below:')\n rollNo = int(input('Rollno: '))\n name = input('Name: ')\n marks = float(input('Marks: '))\n records = str(rollNo) + ',' + name + ',' + str(marks) + '\\n'\n fileObj.write(records)\nfileObj.close()\n",
"step-3": "count = int(input('How many students are there in class? '))\nfileObj = open('marks.txt', 'w')\nfor i in range(count):\n print('Enter details for student', i + 1, 'below:')\n rollNo = int(input('Rollno: '))\n name = input('Name: ')\n marks = float(input('Marks: '))\n records = str(rollNo) + ',' + name + ',' + str(marks) + '\\n'\n fileObj.write(records)\nfileObj.close()\n",
"step-4": "#Get roll numbers, name & marks of the students of a class(get from user) and store these details in a file- marks.txt\n\ncount = int(input(\"How many students are there in class? \"))\nfileObj = open('marks.txt',\"w\")\n\nfor i in range(count):\n print(\"Enter details for student\",(i+1),\"below:\")\n rollNo = int(input(\"Rollno: \"))\n name = input(\"Name: \")\n marks = float(input(\"Marks: \"))\n records = str(rollNo) + \",\" + name + \",\" + str(marks) + '\\n'\n fileObj.write(records)\nfileObj.close()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Content-Type: text/html')
print()
<|reserved_special_token_0|>
cgitb.enable()
<|reserved_special_token_0|>
print('Name of the user is:', name)
<|reserved_special_token_0|>
cursor.execute(name)
<|reserved_special_token_0|>
print(name)
db.close()
<|reserved_special_token_1|>
print('Content-Type: text/html')
print()
<|reserved_special_token_0|>
cgitb.enable()
form = cgi.FieldStorage()
name = form.getvalue('fname')
print('Name of the user is:', name)
<|reserved_special_token_0|>
db = pymysql.connect('localhost', 'root', 'Manchesterutd20', 'sts')
cursor = db.cursor()
cursor.execute(name)
name = cursor.fetchall()
print(name)
db.close()
<|reserved_special_token_1|>
print('Content-Type: text/html')
print()
import cgi, cgitb
cgitb.enable()
form = cgi.FieldStorage()
name = form.getvalue('fname')
print('Name of the user is:', name)
import pymysql
db = pymysql.connect('localhost', 'root', 'Manchesterutd20', 'sts')
cursor = db.cursor()
cursor.execute(name)
name = cursor.fetchall()
print(name)
db.close()
<|reserved_special_token_1|>
#!C:/Users/Tarang/AppData/Local/Programs/Python/Python37-32/python.exe -u
print("Content-Type: text/html")
print()
import cgi,cgitb
cgitb.enable() #for debugging
form = cgi.FieldStorage()
name = form.getvalue('fname')
print("Name of the user is:",name)
import pymysql
db = pymysql.connect("localhost","root","Manchesterutd20","sts" )
cursor = db.cursor()
cursor.execute(name)
name = cursor.fetchall()
print (name)
db.close()
|
flexible
|
{
"blob_id": "cb28e8bb98cbeed0b703fbfcf7cf30ebca52aa25",
"index": 4247,
"step-1": "<mask token>\n",
"step-2": "print('Content-Type: text/html')\nprint()\n<mask token>\ncgitb.enable()\n<mask token>\nprint('Name of the user is:', name)\n<mask token>\ncursor.execute(name)\n<mask token>\nprint(name)\ndb.close()\n",
"step-3": "print('Content-Type: text/html')\nprint()\n<mask token>\ncgitb.enable()\nform = cgi.FieldStorage()\nname = form.getvalue('fname')\nprint('Name of the user is:', name)\n<mask token>\ndb = pymysql.connect('localhost', 'root', 'Manchesterutd20', 'sts')\ncursor = db.cursor()\ncursor.execute(name)\nname = cursor.fetchall()\nprint(name)\ndb.close()\n",
"step-4": "print('Content-Type: text/html')\nprint()\nimport cgi, cgitb\ncgitb.enable()\nform = cgi.FieldStorage()\nname = form.getvalue('fname')\nprint('Name of the user is:', name)\nimport pymysql\ndb = pymysql.connect('localhost', 'root', 'Manchesterutd20', 'sts')\ncursor = db.cursor()\ncursor.execute(name)\nname = cursor.fetchall()\nprint(name)\ndb.close()\n",
"step-5": "#!C:/Users/Tarang/AppData/Local/Programs/Python/Python37-32/python.exe -u\r\nprint(\"Content-Type: text/html\")\r\nprint()\r\n\r\nimport cgi,cgitb\r\ncgitb.enable() #for debugging\r\nform = cgi.FieldStorage()\r\nname = form.getvalue('fname')\r\nprint(\"Name of the user is:\",name)\r\n\r\nimport pymysql\r\n\r\ndb = pymysql.connect(\"localhost\",\"root\",\"Manchesterutd20\",\"sts\" )\r\n\r\ncursor = db.cursor()\r\n\r\ncursor.execute(name)\r\n\r\nname = cursor.fetchall()\r\n\r\nprint (name)\r\n\r\ndb.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]
[IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B2')
sys.exit()
GOTO100()
<|reserved_special_token_0|>
def CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B4')
sys.exit()
GOTO100()
def CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]
[LGAS]] - ELEV[I][IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1(I + (J - 1) * 17):
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B5')
sys.exit()
GOTO100()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]
[IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B2')
sys.exit()
GOTO100()
def CALCB3(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B3')
sys.exit()
GOTO100()
def CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B4')
sys.exit()
GOTO100()
def CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]
[LGAS]] - ELEV[I][IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1(I + (J - 1) * 17):
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B5')
sys.exit()
GOTO100()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def CALCB1(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = numpy.zeros(17 + 1)
TEMP1 = numpy.zeros(289 + 1)
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]
[IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B1')
sys.exit()
GOTO100()
def CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]
[IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B2')
sys.exit()
GOTO100()
def CALCB3(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B3')
sys.exit()
GOTO100()
def CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B4')
sys.exit()
GOTO100()
def CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]
[LGAS]] - ELEV[I][IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1(I + (J - 1) * 17):
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B5')
sys.exit()
GOTO100()
<|reserved_special_token_1|>
import numpy
def CALCB1(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = numpy.zeros(17 + 1)
TEMP1 = numpy.zeros(289 + 1)
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]
[IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B1')
sys.exit()
GOTO100()
def CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]
[IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B2')
sys.exit()
GOTO100()
def CALCB3(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J
][IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B3')
sys.exit()
GOTO100()
def CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]
[LGAS]] - ELEV[I, IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1[I + (J - 1) * 17]:
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
counter117 = 0
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B4')
sys.exit()
GOTO100()
def CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):
global ELEV
global NSDEG
global AA
global BB
global SCR, SCR1
global PRSH
global ESH
global AUG
global RAD
global PRSHBT
global IZ
global INIOCC
global ISHLMX
global AMZ
global NOCC
global AUGR
global RADR
global IONSUM0
global IFLSUM0
global ESTORE0
global EPHOTON0
global DRXE0
global DRYE0
global DRZE0
global DRX0
global DRY0
global DRZ0
global IONSUM
global IFLSUM
global ESTORE
global EPHOTON
global DRXE
global DRYE
global DRZE
global DRX
global DRY
global DRZ
TEMP = [(0) for x in range(17)]
TEMP1 = [(0) for x in range(289)]
ISTART = IONSUM[NVAC]
ISTARTF = IFLSUM[NVAC]
ISHELLST = ISHELL
API = numpy.arccos(-1.0)
TWOPI = 2.0 * API
def GOTO100():
ELEFT = ELECEN
ISHELL = ISHELLST
INIT = 1
for I in range(1, 17):
NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]
IONSUM[NVAC] = ISTART + 1
IFLSUM[NVAC] = ISTARTF
ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
ELEFT = ELEFT - ELECN
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1
APE = AA[ISHELL]
BPE = BB[ISHELL]
ANGGEN(APE, BPE, THET)
if THET < 0.0:
THET = THET + API
R3 = DRAND48(RDUM)
PHI = TWOPI * R3
DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,
DRXX, DRYY, DRZZ)
DRXE[NVAC][IONSUM[NVAC]] = DRXX
DRYE[NVAC][IONSUM[NVAC]] = DRYY
DRZE[NVAC][IONSUM[NVAC]] = DRZZ
def GOTO4():
IDUM = 1
if INIT > 1:
ELECN = ESTORE[NVAC][IONSUM[NVAC]]
INSUM = IONSUM[NVAC]
SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)
if JVAC == 0:
pass
else:
ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]] = ELECN
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',
IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ESHK
ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
def GOTO2():
UPDATE(KGAS, LGAS, ISHELL)
INIT = 2
TSUM = 0.0
for I in range(1, 17):
TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]
for J in range(1, 17):
TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]
if TSUM == 0.0:
return
for I in range(1, 17):
RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I
] / TSUM
for J in range(1, 17):
AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][
ISHELL][I][J] / TSUM
TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]
for I in range(2, 17):
TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]
TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2, 17):
TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]
for J in range(1, 16):
for I in range(1, 17):
TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1
] + TEMP1[I + J * 17 - 1]
R1 = DRAND48(RDUM)
for I in range(1, 17):
if R1 < TEMP[I]:
IFLSUM[NVAC] = IFLSUM[NVAC] + 1
EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]
[LGAS]] - ELEV[I][IZ[KGAS][LGAS]]
ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(
PHI)
DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(
PHI)
DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO2()
GOTO2()
counter116 = 1
while counter116:
counter116 = 0
R2 = R1 - TEMP[17]
for J in range(1, 17):
if counter116:
break
for I in range(1, 17):
if R2 < TEMP1(I + (J - 1) * 17):
ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,
IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]
) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,
IZ[KGAS][LGAS] + 1]) * 0.5
if ETEMP < 0.0:
counter117 = 1
while counter117:
R1 = DRAND48(RDUM)
if R1 < TEMP[17]:
counter117 = 1
counter116 = 1
break
IONSUM[NVAC] = IONSUM[NVAC] + 1
if IONSUM[NVAC] > 28:
print(
' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='
, IONSUM[NVAC])
sys.exit()
ESTORE[NVAC][IONSUM[NVAC]] = ETEMP
ELEFT = ELEFT - abs(ETEMP)
if ELEFT < 0.0:
GOTO100()
R3 = DRAND48(RDUM)
THET = numpy.arccos(1.0 - 2.0 * R3)
R4 = DRAND48(RDUM)
PHI = TWOPI * R4
DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET
) * numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL
] + 1
NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1
NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1
VACANCY(KGAS, LGAS, ISHELL, ILAST)
if ILAST == 1:
return
GOTO4()
GOTO4()
print(' ERROR IN CASCADE B5')
sys.exit()
GOTO100()
<|reserved_special_token_1|>
import numpy
def CALCB1(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCASB/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS1B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=numpy.zeros((17+1))
TEMP1=numpy.zeros((289+1))
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28):
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND ANGLE : UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2() ## calling the internal function first time
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I][IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B1')
sys.exit()
GOTO100()
# end
def CALCB2(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS1B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS2B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE INITIAL PHOTELECTRON AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
#16 CONTINUE
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I][IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B2')
sys.exit()
GOTO100()
# end
def CALCB3(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS2B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS3B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B3')
sys.exit()
GOTO100()
#end
def CALCB4(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS3B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS4B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL,IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I,IZ[KGAS][LGAS]+1])*0.5-(ELEV[J,IZ[KGAS][LGAS]]+ELEV[J,IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B4')
sys.exit()
GOTO100()
# end
def CALCB5(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
# SCR=""
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS4B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS5B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL][IZ[KGAS][LGAS]]-ELEV[I][IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1(I+((J-1)*17))) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL,IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I,IZ[KGAS][LGAS]+1])*0.5-(ELEV[J,IZ[KGAS][LGAS]]+ELEV[J,IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B5')
sys.exit()
GOTO100()
# end
|
flexible
|
{
"blob_id": "09698649510348f92ea3b83f89ffa1c844929b8f",
"index": 3332,
"step-1": "<mask token>\n\n\ndef CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]\n [IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B2')\n sys.exit()\n GOTO100()\n\n\n<mask token>\n\n\ndef CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B4')\n sys.exit()\n GOTO100()\n\n\ndef CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]\n [LGAS]] - ELEV[I][IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1(I + (J - 1) * 17):\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B5')\n sys.exit()\n GOTO100()\n",
"step-2": "<mask token>\n\n\ndef CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]\n [IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B2')\n sys.exit()\n GOTO100()\n\n\ndef CALCB3(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B3')\n sys.exit()\n GOTO100()\n\n\ndef CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B4')\n sys.exit()\n GOTO100()\n\n\ndef CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]\n [LGAS]] - ELEV[I][IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1(I + (J - 1) * 17):\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B5')\n sys.exit()\n GOTO100()\n",
"step-3": "<mask token>\n\n\ndef CALCB1(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = numpy.zeros(17 + 1)\n TEMP1 = numpy.zeros(289 + 1)\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]\n [IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B1')\n sys.exit()\n GOTO100()\n\n\ndef CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]\n [IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B2')\n sys.exit()\n GOTO100()\n\n\ndef CALCB3(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B3')\n sys.exit()\n GOTO100()\n\n\ndef CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B4')\n sys.exit()\n GOTO100()\n\n\ndef CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]\n [LGAS]] - ELEV[I][IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1(I + (J - 1) * 17):\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B5')\n sys.exit()\n GOTO100()\n",
"step-4": "import numpy\n\n\ndef CALCB1(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = numpy.zeros(17 + 1)\n TEMP1 = numpy.zeros(289 + 1)\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]\n [IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B1')\n sys.exit()\n GOTO100()\n\n\ndef CALCB2(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I]\n [IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B2')\n sys.exit()\n GOTO100()\n\n\ndef CALCB3(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL][IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I][IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J][IZ[KGAS][LGAS]] + ELEV[J\n ][IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B3')\n sys.exit()\n GOTO100()\n\n\ndef CALCB4(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL, IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL, IZ[KGAS]\n [LGAS]] - ELEV[I, IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1[I + (J - 1) * 17]:\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n counter117 = 0\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B4')\n sys.exit()\n GOTO100()\n\n\ndef CALCB5(NVAC, KGAS, LGAS, ELECEN, ISHELL, L1):\n global ELEV\n global NSDEG\n global AA\n global BB\n global SCR, SCR1\n global PRSH\n global ESH\n global AUG\n global RAD\n global PRSHBT\n global IZ\n global INIOCC\n global ISHLMX\n global AMZ\n global NOCC\n global AUGR\n global RADR\n global IONSUM0\n global IFLSUM0\n global ESTORE0\n global EPHOTON0\n global DRXE0\n global DRYE0\n global DRZE0\n global DRX0\n global DRY0\n global DRZ0\n global IONSUM\n global IFLSUM\n global ESTORE\n global EPHOTON\n global DRXE\n global DRYE\n global DRZE\n global DRX\n global DRY\n global DRZ\n TEMP = [(0) for x in range(17)]\n TEMP1 = [(0) for x in range(289)]\n ISTART = IONSUM[NVAC]\n ISTARTF = IFLSUM[NVAC]\n ISHELLST = ISHELL\n API = numpy.arccos(-1.0)\n TWOPI = 2.0 * API\n\n def GOTO100():\n ELEFT = ELECEN\n ISHELL = ISHELLST\n INIT = 1\n for I in range(1, 17):\n NOCC[KGAS][LGAS][I] = INIOCC[KGAS][LGAS][I]\n IONSUM[NVAC] = ISTART + 1\n IFLSUM[NVAC] = ISTARTF\n ESTORE[NVAC][IONSUM[NVAC]] = ELECEN - ELEV[ISHELL][IZ[KGAS][LGAS]]\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n ELEFT = ELEFT - ELECN\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] - 1\n APE = AA[ISHELL]\n BPE = BB[ISHELL]\n ANGGEN(APE, BPE, THET)\n if THET < 0.0:\n THET = THET + API\n R3 = DRAND48(RDUM)\n PHI = TWOPI * R3\n DRCOS(DRX0[NVAC][L1], DRY0[NVAC][L1], DRZ0[NVAC][L1], THET, PHI,\n DRXX, DRYY, DRZZ)\n DRXE[NVAC][IONSUM[NVAC]] = DRXX\n DRYE[NVAC][IONSUM[NVAC]] = DRYY\n DRZE[NVAC][IONSUM[NVAC]] = DRZZ\n\n def GOTO4():\n IDUM = 1\n if INIT > 1:\n ELECN = ESTORE[NVAC][IONSUM[NVAC]]\n INSUM = IONSUM[NVAC]\n SHAKE(ISHELL, ELECN, KGAS, LGAS, ESHK, IDUM, INSUM, JVAC)\n if JVAC == 0:\n pass\n else:\n ELECN = ELECN - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n ESTORE[NVAC][IONSUM[NVAC]] = ELECN\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',\n IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ESHK\n ELEFT = ELEFT - ESHK - ELEV[JVAC, IZ[KGAS][LGAS]]\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n\n def GOTO2():\n UPDATE(KGAS, LGAS, ISHELL)\n INIT = 2\n TSUM = 0.0\n for I in range(1, 17):\n TSUM = TSUM + RADR[KGAS][LGAS][ISHELL][I]\n for J in range(1, 17):\n TSUM = TSUM + AUGR[KGAS][LGAS][ISHELL][I][J]\n if TSUM == 0.0:\n return\n for I in range(1, 17):\n RADR[KGAS][LGAS][ISHELL][I] = RADR[KGAS][LGAS][ISHELL][I\n ] / TSUM\n for J in range(1, 17):\n AUGR[KGAS][LGAS][ISHELL][I][J] = AUGR[KGAS][LGAS][\n ISHELL][I][J] / TSUM\n TEMP[1] = RADR[KGAS][LGAS][ISHELL][1]\n for I in range(2, 17):\n TEMP[I] = RADR[KGAS][LGAS][ISHELL][I] + TEMP[I - 1]\n TEMP1[1] = AUGR[KGAS][LGAS][ISHELL][1][1]\n for I in range(2, 17):\n TEMP1[I] = AUGR[KGAS][LGAS][ISHELL][I][1] + TEMP1[I - 1]\n for J in range(1, 16):\n for I in range(1, 17):\n TEMP1[I + J * 17] = AUGR[KGAS][LGAS][ISHELL][I][J + 1\n ] + TEMP1[I + J * 17 - 1]\n R1 = DRAND48(RDUM)\n for I in range(1, 17):\n if R1 < TEMP[I]:\n IFLSUM[NVAC] = IFLSUM[NVAC] + 1\n EPHOTON[NVAC][IFLSUM[NVAC]] = ELEV[ISHELL][IZ[KGAS]\n [LGAS]] - ELEV[I][IZ[KGAS][LGAS]]\n ELEFT = ELEFT - abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRX[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.cos(\n PHI)\n DRY[NVAC][IFLSUM[NVAC]] = numpy.sin(THET) * numpy.sin(\n PHI)\n DRZ[NVAC][IFLSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO2()\n GOTO2()\n counter116 = 1\n while counter116:\n counter116 = 0\n R2 = R1 - TEMP[17]\n for J in range(1, 17):\n if counter116:\n break\n for I in range(1, 17):\n if R2 < TEMP1(I + (J - 1) * 17):\n ETEMP = ELEV[ISHELL, IZ[KGAS][LGAS]] - (ELEV[I,\n IZ[KGAS][LGAS]] + ELEV[I, IZ[KGAS][LGAS] + 1]\n ) * 0.5 - (ELEV[J, IZ[KGAS][LGAS]] + ELEV[J,\n IZ[KGAS][LGAS] + 1]) * 0.5\n if ETEMP < 0.0:\n counter117 = 1\n while counter117:\n R1 = DRAND48(RDUM)\n if R1 < TEMP[17]:\n counter117 = 1\n counter116 = 1\n break\n IONSUM[NVAC] = IONSUM[NVAC] + 1\n if IONSUM[NVAC] > 28:\n print(\n ' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM='\n , IONSUM[NVAC])\n sys.exit()\n ESTORE[NVAC][IONSUM[NVAC]] = ETEMP\n ELEFT = ELEFT - abs(ETEMP)\n if ELEFT < 0.0:\n GOTO100()\n R3 = DRAND48(RDUM)\n THET = numpy.arccos(1.0 - 2.0 * R3)\n R4 = DRAND48(RDUM)\n PHI = TWOPI * R4\n DRXE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.cos(PHI)\n DRYE[NVAC][IONSUM[NVAC]] = numpy.sin(THET\n ) * numpy.sin(PHI)\n DRZE[NVAC][IONSUM[NVAC]] = numpy.cos(THET)\n NOCC[KGAS][LGAS][ISHELL] = NOCC[KGAS][LGAS][ISHELL\n ] + 1\n NOCC[KGAS][LGAS][I] = NOCC[KGAS][LGAS][I] - 1\n NOCC[KGAS][LGAS][J] = NOCC[KGAS][LGAS][J] - 1\n VACANCY(KGAS, LGAS, ISHELL, ILAST)\n if ILAST == 1:\n return\n GOTO4()\n GOTO4()\n print(' ERROR IN CASCADE B5')\n sys.exit()\n GOTO100()\n",
"step-5": "import numpy\ndef CALCB1(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):\n\t# IMPLICIT #real*8(A-H,O-Z)\n\t# IMPLICIT #integer*8(I-N)\n\t#CHARACTER*6\n\t# SCR=\"\"#(17)\n\t# SCR1=\"\"#(17)\n\t#COMMON/GENCAS/\n\tglobal ELEV#[17,79]\n\tglobal NSDEG#(17)\n\tglobal AA#[17]\n\tglobal BB#[17]\n\tglobal SCR,SCR1\n\t#COMMON/MIXC/\n\tglobal PRSH#(6,3,17,17)\n\tglobal ESH#(6,3,17)\n\tglobal AUG#(6,3,17,17,17)\n\tglobal RAD#[6,3,17,17]\n\tglobal PRSHBT#(6,3,17)\n\tglobal IZ#[6,3]\n\tglobal INIOCC#(6,3,17)\n\tglobal ISHLMX#(6,3)\n\tglobal AMZ#[6,3]\n\t#COMMON/UPD/\n\tglobal NOCC#(6,3,17)\n\tglobal AUGR#(6,3,17,17,17)\n\tglobal RADR#(6,3,17,17)\n\t#COMMON/CALCASB/\n\tglobal IONSUM0#(10)\n\tglobal IFLSUM0#(10)\n\tglobal ESTORE0#(10,28)\n\tglobal EPHOTON0#(10,28)\n\tglobal DRXE0#(10,28)\n\tglobal DRYE0#(10,28)\n\tglobal DRZE0#(10,28)\n\tglobal DRX0#(10,28)\n\tglobal DRY0#(10,28)\n\tglobal DRZ0#(10,28)\n\t#COMMON/CALCAS1B/\n\tglobal IONSUM#(10)\n\tglobal IFLSUM#(10)\n\tglobal ESTORE#(10,28)\n\tglobal EPHOTON#(10,28)\n\tglobal DRXE#(10,28)\n\tglobal DRYE#(10,28)\n\tglobal DRZE#(10,28)\n\tglobal DRX#(10,28)\n\tglobal DRY#(10,28)\n\tglobal DRZ#[10,28]\n\t#DIMENSION \n\tTEMP=numpy.zeros((17+1))\n\tTEMP1=numpy.zeros((289+1))\n\t#\n\t# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS \n\t# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL\n\t#\n\tISTART=IONSUM[NVAC]\n\tISTARTF=IFLSUM[NVAC]\n\tISHELLST=ISHELL\n\tAPI=numpy.arccos(-1.00)\n\tTWOPI=2.00*API\n\tdef GOTO100():\n\t\tELEFT=ELECEN\n\t\tISHELL=ISHELLST\n\t\tINIT=1\n\t\t# SET STARTING ARRAY NOCC EQUAL TO INIOCC\n\t\tfor I in range(1,17):\n\t\t\tNOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]\n\t\tIONSUM[NVAC]=ISTART+1\n\t\tIFLSUM[NVAC]=ISTARTF\n\t\t# STORE PHOTOELECTRON ENERGY AND ANGLE\n\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]\n\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\tELEFT=ELEFT-ELECN\n\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1 \n\t\t# USE PHOTELECTRON ANGULAR DISTRIBUTION\n\t\tAPE=AA[ISHELL]\n\t\tBPE=BB[ISHELL]\n\t\tANGGEN(APE,BPE,THET)\n\t\tif(THET < 0.0):\n\t\t THET=THET+API\n\t\tR3=DRAND48(RDUM)\n\t\tPHI=TWOPI*R3\n\t\tDRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)\n\t\tDRXE[NVAC][IONSUM[NVAC]]=DRXX\n\t\tDRYE[NVAC][IONSUM[NVAC]]=DRYY\n\t\tDRZE[NVAC][IONSUM[NVAC]]=DRZZ\n\t\t# LOOP AROUND CASCADE\n\t\tdef GOTO4():\n\t\t\t# CHECK FOR ELECTRON SHAKEOFF\n\t\t\tIDUM=1\n\t\t\tif(INIT > 1):\n\t\t\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\t\tINSUM=IONSUM[NVAC]\n\t\t\tSHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)\n\t\t\t# CALCULATE ENERGY OF ELECTRON\n\t\t\tif(JVAC == 0):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# ELECTRON + SHAKEOFF\n\t\t\t\tELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECN\n\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t# MAXIMUM ION CHARGE STATE =28\n\t\t\t\tif(IONSUM[NVAC]> 28):\n\t\t\t\t\tprint(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC]) \n\t\t\t\t\tsys.exit() \n\t\t\t\t# endif \n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ESHK \n\t\t\t\tELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\tGOTO100()\n\t\t\t\t# RANDOM EMISSION DIRECTION \n\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\tPHI=TWOPI*R4\n\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\tdef GOTO2():\n\t\t\t\tUPDATE(KGAS,LGAS,ISHELL)\n\t\t\t\tINIT=2\n\t\t\t\t# CHOOSE FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tTSUM=0.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tTSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tTSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]\n\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\tif(TSUM == 0.0):\n\t\t\t\t\treturn \n\t\t\t\t# NORMALISE TO 1.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tRADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tAUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM\n\t\t\t\t# CREATE CUMULATIVE SUM ARRAY\n\t\t\t\tTEMP[1]=RADR[KGAS][LGAS][ISHELL][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]\n\t\t\t\tTEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]\n\t\t\t\tfor J in range(1,16):\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tTEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]\n\t\t\t\t# FIND FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tif(R1 < TEMP[I]) :\n\t\t\t\t\t\t# STORE PHOTON ENERGY AND ANGLE : UPDATE NOCC\n\t\t\t\t\t\tIFLSUM[NVAC]=IFLSUM[NVAC]+1\n\t\t\t\t\t\tEPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]\n\t\t\t\t\t\tELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\tR4=DRAND48(RDUM) \n\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\tDRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\tDRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\tDRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\treturn \n\t\t\t\t\t\t# endif\n\t\t\t\t\t\tGOTO2()\n\t\t\t\t\t# endif \n\t\t\tGOTO2() ## calling the internal function first time \n\t\t\tcounter116=1\n\t\t\twhile(counter116):\n\t\t\t\tcounter116=0\n\t\t\t\tR2=R1-TEMP[17]\n\t\t\t\tfor J in range(1,17):\n\t\t\t\t\tif(counter116):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tif(R2 < TEMP1[I+((J-1)*17)]) :\n\t\t\t\t\t\t\t# AUGER OR COSTER KRONIG \n\t\t\t\t\t\t\t# STORE EJECTED ELECTRON AND UPDATE NOCC\n\t\t\t\t\t\t\tETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I][IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5\n\t\t\t\t\t\t\tif(ETEMP < 0.0):\n\t\t\t\t\t\t\t\t# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS\n\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\twhile(counter117):\n\t\t\t\t\t\t\t\t\tcounter117=0\n\t\t\t\t\t\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\t\t\t\t\t\tif(R1 < TEMP[17]):\n\t\t\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\tcounter116=1\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\t\t\t\tprint(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ETEMP\n\t\t\t\t\t\t\tELEFT=ELEFT-abs(ETEMP)\n\t\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1\n\t\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tGOTO4()\n\t\t\t\t\t\t# endif\n\t\tGOTO4()\n\t\tprint(' ERROR IN CASCADE B1') \n\t\tsys.exit() \n\tGOTO100()\n\t# end\ndef CALCB2(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):\n\t# IMPLICIT #real*8(A-H,O-Z)\n\t# IMPLICIT #integer*8(I-N)\n\t#CHARACTER*6 \n\t# SCR=\"\"#(17)\n\t# SCR1=\"\"#(17)\n\t#COMMON/GENCAS/\n\tglobal ELEV#[17,79]\n\tglobal NSDEG#(17)\n\tglobal AA#[17]\n\tglobal BB#[17]\n\tglobal SCR,SCR1\n\t#COMMON/MIXC/\n\tglobal PRSH#(6,3,17,17)\n\tglobal ESH#(6,3,17)\n\tglobal AUG#(6,3,17,17,17)\n\tglobal RAD#[6,3,17,17]\n\tglobal PRSHBT#(6,3,17)\n\tglobal IZ#[6,3]\n\tglobal INIOCC#(6,3,17)\n\tglobal ISHLMX#(6,3)\n\tglobal AMZ#[6,3]\n\t#COMMON/UPD/\n\tglobal NOCC#(6,3,17)\n\tglobal AUGR#(6,3,17,17,17)\n\tglobal RADR#(6,3,17,17)\n\t#COMMON/CALCAS1B/\n\tglobal IONSUM0#(10)\n\tglobal IFLSUM0#(10)\n\tglobal ESTORE0#(10,28)\n\tglobal EPHOTON0#(10,28)\n\tglobal DRXE0#(10,28)\n\tglobal DRYE0#(10,28)\n\tglobal DRZE0#(10,28)\n\tglobal DRX0#(10,28)\n\tglobal DRY0#(10,28)\n\tglobal DRZ0#(10,28)\n\t#COMMON/CALCAS2B/\n\tglobal IONSUM#(10)\n\tglobal IFLSUM#(10)\n\tglobal ESTORE#(10,28)\n\tglobal EPHOTON#(10,28)\n\tglobal DRXE#(10,28)\n\tglobal DRYE#(10,28)\n\tglobal DRZE#(10,28)\n\tglobal DRX#(10,28)\n\tglobal DRY#(10,28)\n\tglobal DRZ#[10,28]\n\t#DIMENSION\n\tTEMP=[0 for x in range(17)]\n\tTEMP1=[0 for x in range(289)]\n\t#\n\t# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS\n\t# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL\n\t#\n\tISTART=IONSUM[NVAC]\n\tISTARTF=IFLSUM[NVAC]\n\tISHELLST=ISHELL\n\tAPI=numpy.arccos(-1.00)\n\tTWOPI=2.00*API\n\tdef GOTO100():\n\t\tELEFT=ELECEN\n\t\tISHELL=ISHELLST\n\t\tINIT=1\n\t\t# SET STARTING ARRAY NOCC EQUAL TO INIOCC\n\t\tfor I in range(1,17):\n\t\t\tNOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]\n\t\tIONSUM[NVAC]=ISTART+1\n\t\tIFLSUM[NVAC]=ISTARTF\n\t\t# STORE INITIAL PHOTELECTRON AND ANGLE\n\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]\n\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\tELEFT=ELEFT-ELECN\n\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1 \n\t\t# USE PHOTOELECTRON ANGULAR DISTRIBUTION\n\t\tAPE=AA[ISHELL]\n\t\tBPE=BB[ISHELL]\n\t\tANGGEN(APE,BPE,THET)\n\t\tif(THET < 0.0):\n\t\t THET=THET+API\n\t\tR3=DRAND48(RDUM)\n\t\tPHI=TWOPI*R3\n\t\tDRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)\n\t\tDRXE[NVAC][IONSUM[NVAC]]=DRXX\n\t\tDRYE[NVAC][IONSUM[NVAC]]=DRYY\n\t\tDRZE[NVAC][IONSUM[NVAC]]=DRZZ\n\t\t# LOOP AROUND CASCADE\n\t\tdef GOTO4():\n\t\t\t# CHECK FOR ELECTRON SHAKEOFF\n\t\t\tIDUM=1\n\t\t\tif(INIT > 1):\n\t\t\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\t\tINSUM=IONSUM[NVAC]\n\t\t\tSHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)\n\t\t\t# CALCULATE ENERGY OF ELECTRON\n\t\t\tif(JVAC == 0):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# ELECTRON + SHAKEOFF\n\t\t\t\tELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECN\n\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t# MAXIMUM ION CHARGE STATE =28\n\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\tprint(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC]) \n\t\t\t\t\tsys.exit()\n\t\t\t\t# endif\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ESHK\n\t\t\t\tELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\tGOTO100()\n\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\tPHI=TWOPI*R4\n\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\tdef GOTO2():\n\t\t\t\tUPDATE(KGAS,LGAS,ISHELL)\n\t\t\t\tINIT=2\n\t\t\t\t# CHOOSE FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tTSUM=0.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tTSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tTSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]\n\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\tif(TSUM == 0.0):\n\t\t\t\t\treturn \n\t\t\t\t# NORMALISE TO 1.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tRADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tAUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM\n\t\t\t\t# CREATE CUMULATIVE SUM ARRAY\n\t\t\t\tTEMP[1]=RADR[KGAS][LGAS][ISHELL][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]\n\t\t\t\tTEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]\n\t\t\t\tfor J in range(1,16):\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tTEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]\n\t\t\t\t# FIND FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tif(R1 < TEMP[I]) :\n\t\t\t\t\t\t# STORE PHOTON ENERGY AND UPDATE NOCC\n\t\t\t\t\t\tIFLSUM[NVAC]=IFLSUM[NVAC]+1\n\t\t\t\t\t\tEPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]\n\t\t\t\t\t\tELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\tDRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\tDRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\tDRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\treturn \n\t\t\t\t\t\t# endif\n\t\t\t\t\t\tGOTO2()\n\t\t\t\t\t# endif \n\t\t\t\t#16 CONTINUE\n\t\t\tGOTO2()\n\t\t\tcounter116=1\n\t\t\twhile(counter116):\n\t\t\t\tcounter116=0\n\t\t\t\tR2=R1-TEMP[17]\n\t\t\t\tfor J in range(1,17):\n\t\t\t\t\tif(counter116):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tif(R2 < TEMP1[I+((J-1)*17)]) :\n\t\t\t\t\t\t\t# AUGER OR COSTER KRONIG \n\t\t\t\t\t\t\t# STORE EJECTED ELECTRON AND UPDATE NOCC\n\t\t\t\t\t\t\tETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I][IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5\n\t\t\t\t\t\t\tif(ETEMP < 0.0):\n\t\t\t\t\t\t\t\t# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS\n\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\twhile(counter117):\n\t\t\t\t\t\t\t\t\tcounter117=0\n\t\t\t\t\t\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\t\t\t\t\t\tif(R1 < TEMP[17]):\n\t\t\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\tcounter116=1\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\t\t\t\tprint(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ETEMP\n\t\t\t\t\t\t\tELEFT=ELEFT-abs(ETEMP)\n\t\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1\n\t\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\n\t\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tGOTO4()\n\t\t\t\t\t\t# endif\n\t\tGOTO4()\n\t\tprint(' ERROR IN CASCADE B2') \n\t\tsys.exit() \n\tGOTO100()\n\t# end\ndef CALCB3(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):\n\t# IMPLICIT #real*8(A-H,O-Z)\n\t# IMPLICIT #integer*8(I-N)\n\t#CHARACTER*6 \n\t# SCR=\"\"#(17)\n\t# SCR1=\"\"#(17)\n\t#COMMON/GENCAS/\n\tglobal ELEV#[17,79]\n\tglobal NSDEG#(17)\n\tglobal AA#[17]\n\tglobal BB#[17]\n\tglobal SCR,SCR1\n\t#COMMON/MIXC/\n\tglobal PRSH#(6,3,17,17)\n\tglobal ESH#(6,3,17)\n\tglobal AUG#(6,3,17,17,17)\n\tglobal RAD#[6,3,17,17]\n\tglobal PRSHBT#(6,3,17)\n\tglobal IZ#[6,3]\n\tglobal INIOCC#(6,3,17)\n\tglobal ISHLMX#(6,3)\n\tglobal AMZ#[6,3]\n\t#COMMON/UPD/\n\tglobal NOCC#(6,3,17)\n\tglobal AUGR#(6,3,17,17,17)\n\tglobal RADR#(6,3,17,17)\n\t#COMMON/CALCAS2B/\n\tglobal IONSUM0#(10)\n\tglobal IFLSUM0#(10)\n\tglobal ESTORE0#(10,28)\n\tglobal EPHOTON0#(10,28)\n\tglobal DRXE0#(10,28)\n\tglobal DRYE0#(10,28)\n\tglobal DRZE0#(10,28)\n\tglobal DRX0#(10,28)\n\tglobal DRY0#(10,28)\n\tglobal DRZ0#(10,28)\n\t#COMMON/CALCAS3B/\n\tglobal IONSUM#(10)\n\tglobal IFLSUM#(10)\n\tglobal ESTORE#(10,28)\n\tglobal EPHOTON#(10,28)\n\tglobal DRXE#(10,28)\n\tglobal DRYE#(10,28)\n\tglobal DRZE#(10,28)\n\tglobal DRX#(10,28)\n\tglobal DRY#(10,28)\n\tglobal DRZ#[10,28]\n\t#DIMENSION\n\tTEMP=[0 for x in range(17)]\n\tTEMP1=[0 for x in range(289)]\n\t#\n\t# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS\n\t# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL\n\t#\n\tISTART=IONSUM[NVAC]\n\tISTARTF=IFLSUM[NVAC]\n\tISHELLST=ISHELL\n\tAPI=numpy.arccos(-1.00)\n\tTWOPI=2.00*API\n\tdef GOTO100():\n\t\tELEFT=ELECEN\n\t\tISHELL=ISHELLST\n\t\tINIT=1\n\t\t# SET STARTING ARRAY NOCC EQUAL TO INIOCC\n\t\tfor I in range(1,17):\n\t\t\tNOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]\n\t\tIONSUM[NVAC]=ISTART+1\n\t\tIFLSUM[NVAC]=ISTARTF\n\t\t# STORE PHOTOELECTRON ENERGY AND ANGLE\n\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]\n\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\tELEFT=ELEFT-ELECN\n\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1 \n\t\t# USE PHOTOELECTRON ANGULAR DISTRIBUTION\n\t\tAPE=AA[ISHELL]\n\t\tBPE=BB[ISHELL]\n\t\tANGGEN(APE,BPE,THET)\n\t\tif(THET < 0.0):\n\t\t THET=THET+API\n\t\tR3=DRAND48(RDUM)\n\t\tPHI=TWOPI*R3\n\t\tDRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)\n\t\tDRXE[NVAC][IONSUM[NVAC]]=DRXX\n\t\tDRYE[NVAC][IONSUM[NVAC]]=DRYY\n\t\tDRZE[NVAC][IONSUM[NVAC]]=DRZZ\n\t\t# LOOP AROUND CASCADE\n\t\tdef GOTO4():\n\t\t\t# CHECK FOR ELECTRON SHAKEOFF\n\t\t\tIDUM=1\n\t\t\tif(INIT > 1):\n\t\t\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\t\tINSUM=IONSUM[NVAC]\n\t\t\tSHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)\n\t\t\t# CALCULATE ENERGY OF ELECTRON\n\t\t\tif(JVAC == 0):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# ELECTRON + SHAKEOFF\n\t\t\t\tELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECN\n\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t# MAXIMUM ION CHARGE STATE =28\n\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\tprint(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC]) \n\t\t\t\t\tsys.exit()\n\t\t\t\t# endif\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ESHK\n\t\t\t\tELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\tGOTO100()\n\t\t\t\t# RANDOM EMISSION ANGLE\n\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\tPHI=TWOPI*R4\n\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\tdef GOTO2():\n\t\t\t\tUPDATE(KGAS,LGAS,ISHELL)\n\t\t\t\tINIT=2\n\t\t\t\t# CHOOSE FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tTSUM=0.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tTSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tTSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]\n\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\tif(TSUM == 0.0):\n\t\t\t\t\treturn \n\t\t\t\t# NORMALISE TO 1.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tRADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tAUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM\n\t\t\t\t# CREATE CUMULATIVE SUM ARRAY\n\t\t\t\tTEMP[1]=RADR[KGAS][LGAS][ISHELL][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]\n\t\t\t\tTEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]\n\t\t\t\tfor J in range(1,16):\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tTEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]\n\t\t\t\t# FIND FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tif(R1 < TEMP[I]) :\n\t\t\t\t\t\t# STORE PHOTON ENERGY AND UPDATE NOCC\n\t\t\t\t\t\tIFLSUM[NVAC]=IFLSUM[NVAC]+1\n\t\t\t\t\t\tEPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]\n\t\t\t\t\t\tELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\tDRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\tDRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\tDRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\treturn \n\t\t\t\t\t\t# endif\n\t\t\t\t\t\tGOTO2() \n\t\t\t\t\t# endif \n\t\t\tGOTO2()\n\t\t\tcounter116=1\n\t\t\twhile(counter116):\n\t\t\t\tcounter116=0\n\t\t\t\tR2=R1-TEMP[17]\n\t\t\t\tfor J in range(1,17):\n\t\t\t\t\tif(counter116):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tif(R2 < TEMP1[I+((J-1)*17)]) :\n\t\t\t\t\t\t\t# AUGER OR COSTER KRONIG \n\t\t\t\t\t\t\t# STORE EJECTED ELECTRON AND UPDATE NOCC\n\t\t\t\t\t\t\tETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5\n\t\t\t\t\t\t\tif(ETEMP < 0.0):\n\t\t\t\t\t\t\t\t# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS\n\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\twhile(counter117):\n\t\t\t\t\t\t\t\t\tcounter117=0\n\t\t\t\t\t\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\t\t\t\t\t\tif(R1 < TEMP[17]):\n\t\t\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\tcounter116=1\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\t\t\t\tprint(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ETEMP\n\t\t\t\t\t\t\tELEFT=ELEFT-abs(ETEMP)\n\t\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1\n\t\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tGOTO4()\n\t\t\t\t\t\t# endif\n\t\tGOTO4()\t\t\t\t\n\t\tprint(' ERROR IN CASCADE B3') \n\t\tsys.exit() \n\tGOTO100()\t\n\t#end\n\t\ndef CALCB4(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):\n\t# IMPLICIT #real*8(A-H,O-Z)\n\t# IMPLICIT #integer*8(I-N)\n\t#CHARACTER*6 \n\t# SCR=\"\"#(17)\n\t# SCR1=\"\"#(17)\n\t#COMMON/GENCAS/\n\tglobal ELEV#[17,79]\n\tglobal NSDEG#(17)\n\tglobal AA#[17]\n\tglobal BB#[17]\n\tglobal SCR,SCR1\n\t#COMMON/MIXC/\n\tglobal PRSH#(6,3,17,17)\n\tglobal ESH#(6,3,17)\n\tglobal AUG#(6,3,17,17,17)\n\tglobal RAD#[6,3,17,17]\n\tglobal PRSHBT#(6,3,17)\n\tglobal IZ#[6,3]\n\tglobal INIOCC#(6,3,17)\n\tglobal ISHLMX#(6,3)\n\tglobal AMZ#[6,3]\n\t#COMMON/UPD/\n\tglobal NOCC#(6,3,17)\n\tglobal AUGR#(6,3,17,17,17)\n\tglobal RADR#(6,3,17,17)\n\t#COMMON/CALCAS3B/\n\tglobal IONSUM0#(10)\n\tglobal IFLSUM0#(10)\n\tglobal ESTORE0#(10,28)\n\tglobal EPHOTON0#(10,28)\n\tglobal DRXE0#(10,28)\n\tglobal DRYE0#(10,28)\n\tglobal DRZE0#(10,28)\n\tglobal DRX0#(10,28)\n\tglobal DRY0#(10,28)\n\tglobal DRZ0#(10,28)\n\t#COMMON/CALCAS4B/\n\tglobal IONSUM#(10)\n\tglobal IFLSUM#(10)\n\tglobal ESTORE#(10,28)\n\tglobal EPHOTON#(10,28)\n\tglobal DRXE#(10,28)\n\tglobal DRYE#(10,28)\n\tglobal DRZE#(10,28)\n\tglobal DRX#(10,28)\n\tglobal DRY#(10,28)\n\tglobal DRZ#[10,28]\n\t#DIMENSION\n\tTEMP=[0 for x in range(17)]\n\tTEMP1=[0 for x in range(289)]\n\t#\n\t# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS\n\t# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL\n\t#\n\tISTART=IONSUM[NVAC]\n\tISTARTF=IFLSUM[NVAC]\n\tISHELLST=ISHELL\n\tAPI=numpy.arccos(-1.00)\n\tTWOPI=2.00*API\n\tdef GOTO100():\n\t\tELEFT=ELECEN\n\t\tISHELL=ISHELLST\n\t\tINIT=1\n\t\t# SET STARTING ARRAY NOCC EQUAL TO INIOCC\n\t\tfor I in range(1,17):\n\t\t\tNOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]\n\t\tIONSUM[NVAC]=ISTART+1\n\t\tIFLSUM[NVAC]=ISTARTF\n\t\t# STORE PHOTOELECTRON ENERGY AND ANGLE\n\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]\n\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\tELEFT=ELEFT-ELECN\n\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1 \n\t\t# USE PHOTOELECTRON ANGULAR DISTRIBUTION\n\t\tAPE=AA[ISHELL]\n\t\tBPE=BB[ISHELL]\n\t\tANGGEN(APE,BPE,THET)\n\t\tif(THET < 0.0):\n\t\t THET=THET+API\n\t\tR3=DRAND48(RDUM)\n\t\tPHI=TWOPI*R3\n\t\tDRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)\n\t\tDRXE[NVAC][IONSUM[NVAC]]=DRXX\n\t\tDRYE[NVAC][IONSUM[NVAC]]=DRYY\n\t\tDRZE[NVAC][IONSUM[NVAC]]=DRZZ\n\t\t# LOOP AROUND CASCADE\n\t\tdef GOTO4():\n\t\t\t# CHECK FOR ELECTRON SHAKEOFF\n\t\t\tIDUM=1\n\t\t\tif(INIT > 1):\n\t\t\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\t\tINSUM=IONSUM[NVAC]\n\t\t\tSHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)\n\t\t\t# CALCULATE ENERGY OF ELECTRON\n\t\t\tif(JVAC == 0):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# ELECTRON + SHAKEOFF\n\t\t\t\tELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECN\n\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t# MAXIMUM ION CHARGE STATE =28\n\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\tprint(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC]) \n\t\t\t\t\tsys.exit()\n\t\t\t\t# endif\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ESHK\n\t\t\t\tELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\tGOTO100()\n\t\t\t\t# RANDOM EMISSION ANGLE\n\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\tPHI=TWOPI*R4\n\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\tdef GOTO2():\n\t\t\t\tUPDATE(KGAS,LGAS,ISHELL)\n\t\t\t\tINIT=2\n\t\t\t\t# CHOOSE FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tTSUM=0.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tTSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tTSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]\n\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\tif(TSUM == 0.0):\n\t\t\t\t\treturn \n\t\t\t\t# NORMALISE TO 1.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tRADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tAUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM\n\t\t\t\t# CREATE CUMULATIVE SUM ARRAY\n\t\t\t\tTEMP[1]=RADR[KGAS][LGAS][ISHELL][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]\n\t\t\t\tTEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]\n\t\t\t\tfor J in range(1,16):\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tTEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]\n\t\t\t\t# FIND FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tif(R1 < TEMP[I]) :\n\t\t\t\t\t\t# STORE PHOTON ENERGY AND UPDATE NOCC\n\t\t\t\t\t\tIFLSUM[NVAC]=IFLSUM[NVAC]+1\n\t\t\t\t\t\tEPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]\n\t\t\t\t\t\tELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\tDRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\tDRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\tDRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\treturn \n\t\t\t\t\t\t# endif\n\t\t\t\t\t\tGOTO2()\n\t\t\t\t\t# endif \n\t\t\tGOTO2()\n\t\t\tcounter116=1\n\t\t\twhile(counter116):\n\t\t\t\tcounter116=0\n\t\t\t\tR2=R1-TEMP[17]\n\t\t\t\tfor J in range(1,17):\n\t\t\t\t\tif(counter116):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tif(R2 < TEMP1[I+((J-1)*17)]) :\n\t\t\t\t\t\t\t# AUGER OR COSTER KRONIG \n\t\t\t\t\t\t\t# STORE EJECTED ELECTRON AND UPDATE NOCC\n\t\t\t\t\t\t\tETEMP=ELEV[ISHELL,IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I,IZ[KGAS][LGAS]+1])*0.5-(ELEV[J,IZ[KGAS][LGAS]]+ELEV[J,IZ[KGAS][LGAS]+1])*0.5\n\t\t\t\t\t\t\tif(ETEMP < 0.0):\n\t\t\t\t\t\t\t\t# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS\n\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\twhile(counter117):\n\t\t\t\t\t\t\t\t\tcounter117=0\n\t\t\t\t\t\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\t\t\t\t\t\tif(R1 < TEMP[17]):\n\t\t\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\tcounter116=1\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\t\t\t\tprint(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ETEMP\n\t\t\t\t\t\t\tELEFT=ELEFT-abs(ETEMP)\n\t\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1\n\t\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tGOTO4() \n\t\t\t\t\t\t# endif\n\t\tGOTO4()\n\t\tprint(' ERROR IN CASCADE B4') \n\t\tsys.exit() \n\tGOTO100()\n\t# end\n\ndef CALCB5(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):\n\t# IMPLICIT #real*8(A-H,O-Z)\n\t# IMPLICIT #integer*8(I-N)\n\t# SCR=\"\"\n\t# SCR1=\"\"#(17)\n\t#COMMON/GENCAS/\n\tglobal ELEV#[17,79]\n\tglobal NSDEG#(17)\n\tglobal AA#[17]\n\tglobal BB#[17]\n\tglobal SCR,SCR1\n\t#COMMON/MIXC/\n\tglobal PRSH#(6,3,17,17)\n\tglobal ESH#(6,3,17)\n\tglobal AUG#(6,3,17,17,17)\n\tglobal RAD#[6,3,17,17]\n\tglobal PRSHBT#(6,3,17)\n\tglobal IZ#[6,3]\n\tglobal INIOCC#(6,3,17)\n\tglobal ISHLMX#(6,3)\n\tglobal AMZ#[6,3]\n\t#COMMON/UPD/\n\tglobal NOCC#(6,3,17)\n\tglobal AUGR#(6,3,17,17,17)\n\tglobal RADR#(6,3,17,17)\n\t#COMMON/CALCAS4B/\n\tglobal IONSUM0#(10)\n\tglobal IFLSUM0#(10)\n\tglobal ESTORE0#(10,28)\n\tglobal EPHOTON0#(10,28)\n\tglobal DRXE0#(10,28)\n\tglobal DRYE0#(10,28)\n\tglobal DRZE0#(10,28)\n\tglobal DRX0#(10,28)\n\tglobal DRY0#(10,28)\n\tglobal DRZ0#(10,28)\n\t#COMMON/CALCAS5B/\n\tglobal IONSUM#(10)\n\tglobal IFLSUM#(10)\n\tglobal ESTORE#(10,28)\n\tglobal EPHOTON#(10,28)\n\tglobal DRXE#(10,28)\n\tglobal DRYE#(10,28)\n\tglobal DRZE#(10,28)\n\tglobal DRX#(10,28)\n\tglobal DRY#(10,28)\n\tglobal DRZ#[10,28]\n\t#DIMENSION \n\tTEMP=[0 for x in range(17)]\n\tTEMP1=[0 for x in range(289)]\n\t#\n\t# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS\n\t# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL\n\t#\n\tISTART=IONSUM[NVAC]\n\tISTARTF=IFLSUM[NVAC]\n\tISHELLST=ISHELL\n\tAPI=numpy.arccos(-1.00)\n\tTWOPI=2.00*API\n\tdef GOTO100():\n\t\tELEFT=ELECEN\n\t\tISHELL=ISHELLST\n\t\tINIT=1\n\t\t# SET STARTING ARRAY NOCC EQUAL TO INIOCC\n\t\tfor I in range(1,17):\n\t\t\tNOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]\n\t\tIONSUM[NVAC]=ISTART+1\n\t\tIFLSUM[NVAC]=ISTARTF\n\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL][IZ[KGAS][LGAS]]\n\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\tELEFT=ELEFT-ELECN\n\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1 \n\t\t# USE PHOTOELECTRON ANGULAR DISTRIBUTION\n\t\tAPE=AA[ISHELL]\n\t\tBPE=BB[ISHELL]\n\t\tANGGEN(APE,BPE,THET)\n\t\tif(THET < 0.0):\n\t\t\tTHET=THET+API\n\t\tR3=DRAND48(RDUM)\n\t\tPHI=TWOPI*R3\n\t\tDRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)\n\t\tDRXE[NVAC][IONSUM[NVAC]]=DRXX\n\t\tDRYE[NVAC][IONSUM[NVAC]]=DRYY\n\t\tDRZE[NVAC][IONSUM[NVAC]]=DRZZ\n\t\t# LOOP AROUND CASCADE\n\t\tdef GOTO4():\n\t\t\t# CHECK FOR ELECTRON SHAKEOFF\n\t\t\tIDUM=1\n\t\t\tif(INIT > 1):\n\t\t\t\tELECN=ESTORE[NVAC][IONSUM[NVAC]]\n\t\t\tINSUM=IONSUM[NVAC]\n\t\t\tSHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)\n\t\t\t# CALCULATE ENERGY OF ELECTRON\n\t\t\tif(JVAC == 0):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# ELECTRON + SHAKEOFF\n\t\t\t\tELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ELECN\n\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t# MAXIMUM ION CHARGE STATE =28\n\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\tprint(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])\n\t\t\t\t\tsys.exit() \n\t\t\t\t# endif\n\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ESHK\n\t\t\t\tELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]\n\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\tGOTO100()\n\t\t\t\t# RANDOM EMISSION ANGLE\n\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\tPHI=TWOPI*R4\n\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\tdef GOTO2():\n\t\t\t\tUPDATE(KGAS,LGAS,ISHELL)\n\t\t\t\tINIT=2\n\t\t\t\t# CHOOSE FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tTSUM=0.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tTSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tTSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]\n\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\tif(TSUM == 0.0):\n\t\t\t\t\treturn \n\t\t\t\t# NORMALISE TO 1.0\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tRADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM\n\t\t\t\t\tfor J in range(1,17):\n\t\t\t\t\t\tAUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM\n\t\t\t\t# CREATE CUMULATIVE SUM ARRAY\n\t\t\t\tTEMP[1]=RADR[KGAS][LGAS][ISHELL][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]\n\t\t\t\tTEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]\n\t\t\t\tfor I in range(2,17):\n\t\t\t\t\tTEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]\n\t\t\t\tfor J in range(1,16):\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tTEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]\n\t\t\t\t# FIND FLUORESCENCE OR AUGER TRANSITION\n\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\tfor I in range(1,17):\n\t\t\t\t\tif(R1 < TEMP[I]) :\n\t\t\t\t\t\t# STORE PHOTON ENERGY AND UPDATE NOCC\n\t\t\t\t\t\tIFLSUM[NVAC]=IFLSUM[NVAC]+1\n\t\t\t\t\t\tEPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL][IZ[KGAS][LGAS]]-ELEV[I][IZ[KGAS][LGAS]]\n\t\t\t\t\t\tELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])\n\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\tDRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\tDRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\tDRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\treturn \n\t\t\t\t\t\t# endif\n\t\t\t\t\t\tGOTO2()\n\t\t\t\t\t# endif \n\t\t\tGOTO2()\n\t\t\tcounter116=1\n\t\t\twhile(counter116):\n\t\t\t\tcounter116=0\n\t\t\t\tR2=R1-TEMP[17]\n\t\t\t\tfor J in range(1,17):\n\t\t\t\t\tif(counter116):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tfor I in range(1,17):\n\t\t\t\t\t\tif(R2 < TEMP1(I+((J-1)*17))) :\n\t\t\t\t\t\t\t# AUGER OR COSTER KRONIG \n\t\t\t\t\t\t\t# STORE EJECTED ELECTRON AND UPDATE NOCC\n\t\t\t\t\t\t\tETEMP=ELEV[ISHELL,IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I,IZ[KGAS][LGAS]+1])*0.5-(ELEV[J,IZ[KGAS][LGAS]]+ELEV[J,IZ[KGAS][LGAS]+1])*0.5\n\t\t\t\t\t\t\tif(ETEMP < 0.0):\n\t\t\t\t\t\t\t\t# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS\n\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\twhile(counter117):\n\t\t\t\t\t\t\t\t\tR1=DRAND48(RDUM)\n\t\t\t\t\t\t\t\t\tif(R1 < TEMP[17]):\n\t\t\t\t\t\t\t\t\t\tcounter117=1\n\t\t\t\t\t\t\t\tcounter116=1\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tIONSUM[NVAC]=IONSUM[NVAC]+1\n\t\t\t\t\t\t\tif(IONSUM[NVAC]> 28) :\n\t\t\t\t\t\t\t\tprint(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tESTORE[NVAC][IONSUM[NVAC]]=ETEMP\n\t\t\t\t\t\t\tELEFT=ELEFT-abs(ETEMP)\n\t\t\t\t\t\t\tif(ELEFT < 0.0):\n\t\t\t\t\t\t\t\tGOTO100()\n\t\t\t\t\t\t\t# RANDOM EMISSION DIRECTION\n\t\t\t\t\t\t\tR3=DRAND48(RDUM)\n\t\t\t\t\t\t\tTHET=numpy.arccos(1.0-2.0*R3)\n\t\t\t\t\t\t\tR4=DRAND48(RDUM)\n\t\t\t\t\t\t\tPHI=TWOPI*R4\n\t\t\t\t\t\t\tDRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)\n\t\t\t\t\t\t\tDRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)\n\t\t\t\t\t\t\tDRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1\n\t\t\t\t\t\t\tNOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1\n\t\t\t\t\t\t\t# FIND LOWEST VACANCY\n\t\t\t\t\t\t\tVACANCY(KGAS,LGAS,ISHELL,ILAST)\n\t\t\t\t\t\t\tif(ILAST == 1):\n\t\t\t\t\t\t\t\t# NO MORE TRANSITIONS POSSIBLE\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t# endif\n\t\t\t\t\t\t\tGOTO4()\n\t\t\t\t\t\t# endif\n\t\tGOTO4()\n\t\tprint(' ERROR IN CASCADE B5') \n\t\tsys.exit() \n\tGOTO100()\n\t# end",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from logupload import *
log = LogUpload()
log.uploadLogs(4)
|
normal
|
{
"blob_id": "421837698b7fc188c84a3221271f11a40d1625d9",
"index": 7280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog.uploadLogs(4)\n",
"step-3": "<mask token>\nlog = LogUpload()\nlog.uploadLogs(4)\n",
"step-4": "from logupload import *\nlog = LogUpload()\nlog.uploadLogs(4)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(
'¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas'
)
<|reserved_special_token_0|>
print('\nmucho gusto', n1, ',empecemos')
<|reserved_special_token_0|>
print(
'me puedes decir con que comando en linux puedo listar la informacion de un directorio?'
)
print('a)cd')
print('b) ls')
print('c) cat')
print('d) mv')
print('e) rm')
<|reserved_special_token_0|>
while respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_1 = input('debes volver a ingresar tu respuesta:')
if respuesta_1 == 'b':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
siguiente pregunta""")
print("""
cual de estos comandos sirve para mover un archivo en termux""")
print('a) cd')
print('b) cp')
print('c) mv')
print('d) cat')
print('e) chmod')
<|reserved_special_token_0|>
while respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_2 = input('debes volver a ingresar tu respuesta:')
if respuesta_2 == 'b':
puntaje -= 5
print('incorrecto', n1, '!')
elif respuesta_2 == 'a':
puntaje -= 5
print('mal', n1, ', incorreto')
elif respuesta_2 == 'd':
puntaje -= 5
print('no', n1, '! incorrecto')
elif respuesta_2 == 'e':
puntaje -= 5
print('mal', n1, '! incorrecto')
else:
puntaje += 10
print('correcto', n1, '!!!!')
print("""
siguiente pregunta""")
print("""
que comando puede dar permisos?""")
print('a) chmod')
print('b) cal')
print('c) rm')
print('d) mkdir')
print('e) ls -l')
<|reserved_special_token_0|>
while respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_3 = input('debes volver a ingresar tu respuesta:')
if respuesta_3 == 'a':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
siguiente pregunta""")
print("""
cual de estos comandos puede crear un directorio?""")
print('a) rm')
print('b) mv')
print('c) cp')
print('d) mkdir')
print('e) exit')
<|reserved_special_token_0|>
while respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_4 = input('debes volver a ingresar tu respuesta:')
if respuesta_4 == 'd':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
siguiente pregunta""")
print("""
con que comando puedo dar permisos e almacenaminto a termux?""")
print('a) pwd')
print('b) ls -a')
print('c) lstree')
print('d) temux setup-storage')
print('e) rm -rf')
<|reserved_special_token_0|>
while respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_5 = input('debes volver a ingresar tu respuesta:')
if respuesta_5 == 'd':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
gracias por jugar""", n1, '!')
print("""
este es tu puntaje:""")
print('tienes', puntaje, 'puntos')
print("""
chao, chuidate xd""")
<|reserved_special_token_1|>
print(
'¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas'
)
n1 = input("""
por favor dime como te llamas:""")
print('\nmucho gusto', n1, ',empecemos')
puntaje = 0
print(
'me puedes decir con que comando en linux puedo listar la informacion de un directorio?'
)
print('a)cd')
print('b) ls')
print('c) cat')
print('d) mv')
print('e) rm')
respuesta_1 = input('\n tu respuesta: ')
while respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_1 = input('debes volver a ingresar tu respuesta:')
if respuesta_1 == 'b':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
siguiente pregunta""")
print("""
cual de estos comandos sirve para mover un archivo en termux""")
print('a) cd')
print('b) cp')
print('c) mv')
print('d) cat')
print('e) chmod')
respuesta_2 = input('tu respuesta: ')
while respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_2 = input('debes volver a ingresar tu respuesta:')
if respuesta_2 == 'b':
puntaje -= 5
print('incorrecto', n1, '!')
elif respuesta_2 == 'a':
puntaje -= 5
print('mal', n1, ', incorreto')
elif respuesta_2 == 'd':
puntaje -= 5
print('no', n1, '! incorrecto')
elif respuesta_2 == 'e':
puntaje -= 5
print('mal', n1, '! incorrecto')
else:
puntaje += 10
print('correcto', n1, '!!!!')
print("""
siguiente pregunta""")
print("""
que comando puede dar permisos?""")
print('a) chmod')
print('b) cal')
print('c) rm')
print('d) mkdir')
print('e) ls -l')
respuesta_3 = input('\n tu respuesta: ')
while respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_3 = input('debes volver a ingresar tu respuesta:')
if respuesta_3 == 'a':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
siguiente pregunta""")
print("""
cual de estos comandos puede crear un directorio?""")
print('a) rm')
print('b) mv')
print('c) cp')
print('d) mkdir')
print('e) exit')
respuesta_4 = input('\n tu respuesta: ')
while respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_4 = input('debes volver a ingresar tu respuesta:')
if respuesta_4 == 'd':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
siguiente pregunta""")
print("""
con que comando puedo dar permisos e almacenaminto a termux?""")
print('a) pwd')
print('b) ls -a')
print('c) lstree')
print('d) temux setup-storage')
print('e) rm -rf')
respuesta_5 = input('\n tu respuesta: ')
while respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_5 = input('debes volver a ingresar tu respuesta:')
if respuesta_5 == 'd':
puntaje += 10
print('Muy bien', n1, '!')
else:
puntaje -= 5
print('Incorrecto', n1, '!')
print("""
gracias por jugar""", n1, '!')
print("""
este es tu puntaje:""")
print('tienes', puntaje, 'puntos')
print("""
chao, chuidate xd""")
<|reserved_special_token_1|>
#juego trivia hecho por mayu xD
print('¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas')
n1 = input('\n por favor dime como te llamas:')
print('\nmucho gusto', n1, ',empecemos')
puntaje = 0
print('me puedes decir con que comando en linux puedo listar la informacion de un directorio?')
print('a)cd')
print('b) ls')
print('c) cat')
print('d) mv')
print('e) rm')
respuesta_1 = input('\n tu respuesta: ')
while respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_1 = input("debes volver a ingresar tu respuesta:")
if respuesta_1 == "b":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\nsiguiente pregunta')
print('\ncual de estos comandos sirve para mover un archivo en termux')
print('a) cd')
print('b) cp')
print('c) mv')
print('d) cat')
print('e) chmod')
respuesta_2 = input('tu respuesta: ')
while respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_2 = input("debes volver a ingresar tu respuesta:")
if respuesta_2 == "b":
puntaje -= 5
print('incorrecto', n1, '!')
elif respuesta_2 == "a":
puntaje -= 5
print('mal', n1, ', incorreto')
elif respuesta_2 == "d":
puntaje -= 5
print('no', n1, '! incorrecto')
elif respuesta_2 == "e":
puntaje -= 5
print('mal', n1, '! incorrecto')
else:
puntaje += 10
print('correcto', n1, '!!!!')
print('\nsiguiente pregunta')
print('\nque comando puede dar permisos?')
print('a) chmod')
print('b) cal')
print('c) rm')
print('d) mkdir')
print('e) ls -l')
respuesta_3 = input('\n tu respuesta: ')
while respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_3 = input("debes volver a ingresar tu respuesta:")
if respuesta_3 == "a":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\nsiguiente pregunta')
print('\ncual de estos comandos puede crear un directorio?')
print('a) rm')
print('b) mv')
print('c) cp')
print('d) mkdir')
print('e) exit')
respuesta_4 = input('\n tu respuesta: ')
while respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_4 = input("debes volver a ingresar tu respuesta:")
if respuesta_4 == "d":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\nsiguiente pregunta')
print('\ncon que comando puedo dar permisos e almacenaminto a termux?')
print('a) pwd')
print('b) ls -a')
print('c) lstree')
print('d) temux setup-storage')
print('e) rm -rf')
respuesta_5 = input('\n tu respuesta: ')
while respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_5 = input("debes volver a ingresar tu respuesta:")
if respuesta_5 == "d":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\ngracias por jugar', n1, '!')
print('\neste es tu puntaje:')
print('tienes', puntaje , 'puntos')
print('\nchao, chuidate xd')
|
flexible
|
{
"blob_id": "0c297e6f79682896e98c7a2933a4da6d9af7d7fe",
"index": 9060,
"step-1": "<mask token>\n",
"step-2": "print(\n '¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas'\n )\n<mask token>\nprint('\\nmucho gusto', n1, ',empecemos')\n<mask token>\nprint(\n 'me puedes decir con que comando en linux puedo listar la informacion de un directorio?'\n )\nprint('a)cd')\nprint('b) ls')\nprint('c) cat')\nprint('d) mv')\nprint('e) rm')\n<mask token>\nwhile respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_1 = input('debes volver a ingresar tu respuesta:')\nif respuesta_1 == 'b':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos sirve para mover un archivo en termux\"\"\")\nprint('a) cd')\nprint('b) cp')\nprint('c) mv')\nprint('d) cat')\nprint('e) chmod')\n<mask token>\nwhile respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_2 = input('debes volver a ingresar tu respuesta:')\nif respuesta_2 == 'b':\n puntaje -= 5\n print('incorrecto', n1, '!')\nelif respuesta_2 == 'a':\n puntaje -= 5\n print('mal', n1, ', incorreto')\nelif respuesta_2 == 'd':\n puntaje -= 5\n print('no', n1, '! incorrecto')\nelif respuesta_2 == 'e':\n puntaje -= 5\n print('mal', n1, '! incorrecto')\nelse:\n puntaje += 10\n print('correcto', n1, '!!!!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\nque comando puede dar permisos?\"\"\")\nprint('a) chmod')\nprint('b) cal')\nprint('c) rm')\nprint('d) mkdir')\nprint('e) ls -l')\n<mask token>\nwhile respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_3 = input('debes volver a ingresar tu respuesta:')\nif respuesta_3 == 'a':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos puede crear un directorio?\"\"\")\nprint('a) rm')\nprint('b) mv')\nprint('c) cp')\nprint('d) mkdir')\nprint('e) exit')\n<mask token>\nwhile respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_4 = input('debes volver a ingresar tu respuesta:')\nif respuesta_4 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncon que comando puedo dar permisos e almacenaminto a termux?\"\"\")\nprint('a) pwd')\nprint('b) ls -a')\nprint('c) lstree')\nprint('d) temux setup-storage')\nprint('e) rm -rf')\n<mask token>\nwhile respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_5 = input('debes volver a ingresar tu respuesta:')\nif respuesta_5 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\nprint('Incorrecto', n1, '!')\nprint(\"\"\"\ngracias por jugar\"\"\", n1, '!')\nprint(\"\"\"\neste es tu puntaje:\"\"\")\nprint('tienes', puntaje, 'puntos')\nprint(\"\"\"\nchao, chuidate xd\"\"\")\n",
"step-3": "print(\n '¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas'\n )\nn1 = input(\"\"\"\n por favor dime como te llamas:\"\"\")\nprint('\\nmucho gusto', n1, ',empecemos')\npuntaje = 0\nprint(\n 'me puedes decir con que comando en linux puedo listar la informacion de un directorio?'\n )\nprint('a)cd')\nprint('b) ls')\nprint('c) cat')\nprint('d) mv')\nprint('e) rm')\nrespuesta_1 = input('\\n tu respuesta: ')\nwhile respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_1 = input('debes volver a ingresar tu respuesta:')\nif respuesta_1 == 'b':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos sirve para mover un archivo en termux\"\"\")\nprint('a) cd')\nprint('b) cp')\nprint('c) mv')\nprint('d) cat')\nprint('e) chmod')\nrespuesta_2 = input('tu respuesta: ')\nwhile respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_2 = input('debes volver a ingresar tu respuesta:')\nif respuesta_2 == 'b':\n puntaje -= 5\n print('incorrecto', n1, '!')\nelif respuesta_2 == 'a':\n puntaje -= 5\n print('mal', n1, ', incorreto')\nelif respuesta_2 == 'd':\n puntaje -= 5\n print('no', n1, '! incorrecto')\nelif respuesta_2 == 'e':\n puntaje -= 5\n print('mal', n1, '! incorrecto')\nelse:\n puntaje += 10\n print('correcto', n1, '!!!!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\nque comando puede dar permisos?\"\"\")\nprint('a) chmod')\nprint('b) cal')\nprint('c) rm')\nprint('d) mkdir')\nprint('e) ls -l')\nrespuesta_3 = input('\\n tu respuesta: ')\nwhile respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_3 = input('debes volver a ingresar tu respuesta:')\nif respuesta_3 == 'a':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos puede crear un directorio?\"\"\")\nprint('a) rm')\nprint('b) mv')\nprint('c) cp')\nprint('d) mkdir')\nprint('e) exit')\nrespuesta_4 = input('\\n tu respuesta: ')\nwhile respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_4 = input('debes volver a ingresar tu respuesta:')\nif respuesta_4 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncon que comando puedo dar permisos e almacenaminto a termux?\"\"\")\nprint('a) pwd')\nprint('b) ls -a')\nprint('c) lstree')\nprint('d) temux setup-storage')\nprint('e) rm -rf')\nrespuesta_5 = input('\\n tu respuesta: ')\nwhile respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_5 = input('debes volver a ingresar tu respuesta:')\nif respuesta_5 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\nprint('Incorrecto', n1, '!')\nprint(\"\"\"\ngracias por jugar\"\"\", n1, '!')\nprint(\"\"\"\neste es tu puntaje:\"\"\")\nprint('tienes', puntaje, 'puntos')\nprint(\"\"\"\nchao, chuidate xd\"\"\")\n",
"step-4": "#juego trivia hecho por mayu xD\r\nprint('¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas')\r\nn1 = input('\\n por favor dime como te llamas:')\r\nprint('\\nmucho gusto', n1, ',empecemos')\r\npuntaje = 0\r\nprint('me puedes decir con que comando en linux puedo listar la informacion de un directorio?')\r\nprint('a)cd')\r\nprint('b) ls')\r\nprint('c) cat')\r\nprint('d) mv')\r\nprint('e) rm')\r\nrespuesta_1 = input('\\n tu respuesta: ')\r\nwhile respuesta_1 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_1 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_1 == \"b\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n print(\"Incorrecto\", n1, \"!\")\r\n \r\nprint('\\nsiguiente pregunta')\r\nprint('\\ncual de estos comandos sirve para mover un archivo en termux')\r\nprint('a) cd')\r\nprint('b) cp')\r\nprint('c) mv')\r\nprint('d) cat')\r\nprint('e) chmod')\r\nrespuesta_2 = input('tu respuesta: ')\r\nwhile respuesta_2 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_2 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_2 == \"b\":\r\n puntaje -= 5\r\n print('incorrecto', n1, '!')\r\nelif respuesta_2 == \"a\":\r\n puntaje -= 5\r\n print('mal', n1, ', incorreto')\r\nelif respuesta_2 == \"d\":\r\n puntaje -= 5\r\n print('no', n1, '! incorrecto')\r\nelif respuesta_2 == \"e\":\r\n puntaje -= 5\r\n print('mal', n1, '! incorrecto')\r\nelse:\r\n puntaje += 10\r\n print('correcto', n1, '!!!!')\r\n\r\n\r\n \r\nprint('\\nsiguiente pregunta')\r\nprint('\\nque comando puede dar permisos?')\r\nprint('a) chmod')\r\nprint('b) cal')\r\nprint('c) rm')\r\nprint('d) mkdir')\r\nprint('e) ls -l')\r\nrespuesta_3 = input('\\n tu respuesta: ')\r\nwhile respuesta_3 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_3 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_3 == \"a\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n print(\"Incorrecto\", n1, \"!\")\r\n\r\nprint('\\nsiguiente pregunta')\r\nprint('\\ncual de estos comandos puede crear un directorio?')\r\nprint('a) rm')\r\nprint('b) mv')\r\nprint('c) cp')\r\nprint('d) mkdir')\r\nprint('e) exit')\r\n\r\nrespuesta_4 = input('\\n tu respuesta: ')\r\nwhile respuesta_4 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_4 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_4 == \"d\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n print(\"Incorrecto\", n1, \"!\")\r\n\r\nprint('\\nsiguiente pregunta')\r\nprint('\\ncon que comando puedo dar permisos e almacenaminto a termux?')\r\nprint('a) pwd')\r\nprint('b) ls -a')\r\nprint('c) lstree')\r\nprint('d) temux setup-storage')\r\nprint('e) rm -rf')\r\n\r\nrespuesta_5 = input('\\n tu respuesta: ')\r\nwhile respuesta_5 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_5 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_5 == \"d\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n\r\nprint(\"Incorrecto\", n1, \"!\")\r\nprint('\\ngracias por jugar', n1, '!')\r\nprint('\\neste es tu puntaje:')\r\nprint('tienes', puntaje , 'puntos')\r\nprint('\\nchao, chuidate xd')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^class/([^/]+)/?$', views.puppet_class, name='puppet-class'),
url(r'^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host, name='edit-host'),
url(r'^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host, name='add-host'),
url(r'^delete/([^/]+)/?$', views.delete_host, name='delete-host'),
url(r'^user/(?P<loginid>[^/]+)/?$', views.edit_user, name='edit-user'),
# url(r'^add-host', views.add_host, name='add-host'),
url(r'^', views.index, name='index'),
]
|
normal
|
{
"blob_id": "add56d52f3c88f814a166d12c3bc5a5906268864",
"index": 484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^class/([^/]+)/?$', views.puppet_class, name=\n 'puppet-class'), url('^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host,\n name='edit-host'), url('^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host,\n name='add-host'), url('^delete/([^/]+)/?$', views.delete_host, name=\n 'delete-host'), url('^user/(?P<loginid>[^/]+)/?$', views.edit_user,\n name='edit-user'), url('^', views.index, name='index')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [url('^class/([^/]+)/?$', views.puppet_class, name=\n 'puppet-class'), url('^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host,\n name='edit-host'), url('^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host,\n name='add-host'), url('^delete/([^/]+)/?$', views.delete_host, name=\n 'delete-host'), url('^user/(?P<loginid>[^/]+)/?$', views.edit_user,\n name='edit-user'), url('^', views.index, name='index')]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^class/([^/]+)/?$', views.puppet_class, name='puppet-class'),\n url(r'^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host, name='edit-host'),\n url(r'^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host, name='add-host'),\n url(r'^delete/([^/]+)/?$', views.delete_host, name='delete-host'),\n url(r'^user/(?P<loginid>[^/]+)/?$', views.edit_user, name='edit-user'),\n # url(r'^add-host', views.add_host, name='add-host'),\n url(r'^', views.index, name='index'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# Developed by Hector Cobos
import sys
import csv
import datetime
def mapper():
# Using a reader in order to read the whole file
reader = csv.reader(sys.stdin, delimiter='\t')
# Jump to the next line. We want to avoid the line with the name of the fields
reader.next()
# loop
for line in reader:
# Checking no. of fields are correct
if len(line) == 19:
author_id=line[3]
date=line[8]
time = date.strip().split(" ")
hour = time[1].strip().split(":")
print "{0}\t{1}".format(author_id, hour[0])
mapper()
|
normal
|
{
"blob_id": "d959ed49a83fb63e0bce31b5c81c013f0986706b",
"index": 4314,
"step-1": "#!/usr/bin/python\n\n# Developed by Hector Cobos\n\nimport sys\nimport csv\nimport datetime\n\ndef mapper():\n\t# Using a reader in order to read the whole file\n\treader = csv.reader(sys.stdin, delimiter='\\t')\n\t# Jump to the next line. We want to avoid the line with the name of the fields\n\treader.next()\n\t# loop\n\tfor line in reader:\n\t\t# Checking no. of fields are correct\n \t\tif len(line) == 19:\n\t\t\tauthor_id=line[3]\n\t\t\tdate=line[8]\n\t\t\ttime = date.strip().split(\" \")\n\t\t\thour = time[1].strip().split(\":\")\n \t\tprint \"{0}\\t{1}\".format(author_id, hour[0])\n\n\nmapper()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class task_NER:
def __init__(self):
self.name = 'NER_task_bio'
self.controller_size = 128
self.controller_layers = 1
self.num_read_heads = 1
self.num_write_heads = 1
self.num_inputs = 200
self.num_outputs = 7
self.memory_N = 128
self.memory_M = 128
self.num_batches = -1
self.save_batch = 5
self.batch_size = 10
self.num_epoch = 4
self.adam_lr = 0.0001
self.adam_betas = 0.9, 0.999
self.adam_eps = 1e-08
self.machine = None
self.loss = None
self.optimizer = None
self.labelDict = None
self.reverseDict = None
self.concept_path_train = '../medical_data/train_data/concept'
self.text_path_train = '../medical_data/train_data/txt'
self.concept_path_test = '../medical_data/test_data/concept'
self.text_path_test = '../medical_data/test_data/txt'
self.save_path = '../medical_data/cleaned_files'
self.embed_dic_path = (
'../medical_data/embeddings/bio_embedding_dictionary.dat')
self.random_vec = '../medical_data/embeddings/random_vec.dat'
self.model_path = '../saved_models/'
self.padding_symbol = np.full(self.num_inputs, 0.01)
def get_task_name(self):
return self.name
<|reserved_special_token_0|>
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction='mean')
<|reserved_special_token_0|>
def calc_loss(self, Y_pred, Y):
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
<|reserved_special_token_0|>
def print_word(self, token_class):
word = self.reverseDict[token_class]
print(word + '\n')
<|reserved_special_token_0|>
def initialize_labels(self):
self.labelDict = {}
self.reverseDict = {}
self.labelDict['b-problem'] = 0
self.labelDict['i-problem'] = 1
self.labelDict['b-test'] = 2
self.labelDict['i-test'] = 3
self.labelDict['b-treatment'] = 4
self.labelDict['i-treatment'] = 5
self.labelDict['o'] = 6
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
self.save_data([self.labelDict, self.reverseDict], os.path.join(
self.save_path, 'label_dicts_bio.dat'))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def modify_labels(self, conceptList, tags):
for e in conceptList:
if e['start_line'] == e['end_line']:
tags[e['start_line'] - 1][e['start_word_no']:e[
'end_word_no'] + 1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end + 1):
if i == start:
tags[i - 1][e['start_word_no']:] = e['label_index'][
0:len(tags[i - 1]) - e['start_word_no']]
beg = len(tags[i - 1]) - e['start_word_no']
elif i == end:
tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][
beg:]
else:
tags[i - 1][:] = e['label_index'][beg:beg + len(
tags[i - 1])]
beg = beg + len(tags[i - 1])
return tags
<|reserved_special_token_0|>
def save_data(self, obj_list, s_path):
pickle.dump(tuple(obj_list), open(s_path, 'wb'))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx) / self.batch_size)
self.num_batches = num_batch
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch <= 0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=
torch.float32)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=
torch.long)
x_out = []
y_out = []
num_batch -= 1
yield self.num_batches - num_batch, x_out_array, y_out_array
counter += 1
def train_model(self):
loss_list = []
seq_length = []
last_batch = 0
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad()
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.
num_outputs), dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[
temp_size - i - 1])
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if batch_num % self.save_batch == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +
', Batch: ' + str(batch_num) + '/' + str(self.
num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +
'Batch Accuracy (Entity Prediction): {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self):
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0
result_dict['total_test'] = 0
result_dict['total_treatment'] = 0
result_dict['correct_problem'] = 0
result_dict['correct_test'] = 0
result_dict['correct_treatment'] = 0
result_dict['false_positive_problem'] = 0
result_dict['false_positive_test'] = 0
result_dict['false_positive_treatment'] = 0
print('\n')
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),
dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -
i - 1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'
] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag[
'test']
result_dict['total_treatment'] = result_dict['total_treatment'
] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'
] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'
] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'
] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict[
'false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict[
'false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict[
'false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print('Test Example ' + str(batch_num) + '/' + str(self.
num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
result_dict['accuracy'] = float(correct) / float(total) * 100.0
result_dict = self.calc_metrics(result_dict)
print('\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(
result_dict['accuracy']))
return result_dict
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class task_NER:
def __init__(self):
self.name = 'NER_task_bio'
self.controller_size = 128
self.controller_layers = 1
self.num_read_heads = 1
self.num_write_heads = 1
self.num_inputs = 200
self.num_outputs = 7
self.memory_N = 128
self.memory_M = 128
self.num_batches = -1
self.save_batch = 5
self.batch_size = 10
self.num_epoch = 4
self.adam_lr = 0.0001
self.adam_betas = 0.9, 0.999
self.adam_eps = 1e-08
self.machine = None
self.loss = None
self.optimizer = None
self.labelDict = None
self.reverseDict = None
self.concept_path_train = '../medical_data/train_data/concept'
self.text_path_train = '../medical_data/train_data/txt'
self.concept_path_test = '../medical_data/test_data/concept'
self.text_path_test = '../medical_data/test_data/txt'
self.save_path = '../medical_data/cleaned_files'
self.embed_dic_path = (
'../medical_data/embeddings/bio_embedding_dictionary.dat')
self.random_vec = '../medical_data/embeddings/random_vec.dat'
self.model_path = '../saved_models/'
self.padding_symbol = np.full(self.num_inputs, 0.01)
def get_task_name(self):
return self.name
<|reserved_special_token_0|>
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction='mean')
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr=self.
adam_lr, betas=self.adam_betas, eps=self.adam_eps)
def calc_loss(self, Y_pred, Y):
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
<|reserved_special_token_0|>
def print_word(self, token_class):
word = self.reverseDict[token_class]
print(word + '\n')
def clip_grads(self):
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine
.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self):
self.labelDict = {}
self.reverseDict = {}
self.labelDict['b-problem'] = 0
self.labelDict['i-problem'] = 1
self.labelDict['b-test'] = 2
self.labelDict['i-test'] = 3
self.labelDict['b-treatment'] = 4
self.labelDict['i-treatment'] = 5
self.labelDict['o'] = 6
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
self.save_data([self.labelDict, self.reverseDict], os.path.join(
self.save_path, 'label_dicts_bio.dat'))
def parse_concepts(self, file_path):
conceptList = []
f = open(file_path)
content = f.readlines()
f.close()
for x in content:
dic = {}
x = re.sub('\n', ' ', x)
x = re.sub('\\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i'] * len(entity)
lab[0] = 'b'
lab = [(l + '-' + label) for l in lab]
else:
print('Data in File: ' + file_path +
', not in expected format..')
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split
(':')[1])
eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split
(':')[1])
"""
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
"""
dic['entity'] = entity
dic['label'] = label
dic['BIO_labels'] = lab
dic['label_index'] = noLab
dic['start_line'] = sLine
dic['start_word_no'] = sCol
dic['end_line'] = eLine
dic['end_word_no'] = eCol
conceptList.append(dic)
return conceptList
def parse_summary(self, file_path):
file_lines = []
tags = []
default_label = len(self.labelDict) - 1
f = open(file_path)
content = f.readlines()
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub('\\ +', ' ', x)
file_lines.append(x.strip().split(' '))
tags.append([default_label] * len(file_lines[-1]))
"""
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("
Corresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
"""
assert len(tags[-1]) == len(file_lines[-1]
), 'Line length is not matching labels length...'
return file_lines, tags
def modify_labels(self, conceptList, tags):
for e in conceptList:
if e['start_line'] == e['end_line']:
tags[e['start_line'] - 1][e['start_word_no']:e[
'end_word_no'] + 1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end + 1):
if i == start:
tags[i - 1][e['start_word_no']:] = e['label_index'][
0:len(tags[i - 1]) - e['start_word_no']]
beg = len(tags[i - 1]) - e['start_word_no']
elif i == end:
tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][
beg:]
else:
tags[i - 1][:] = e['label_index'][beg:beg + len(
tags[i - 1])]
beg = beg + len(tags[i - 1])
return tags
def print_data(self, file, file_lines, tags):
counter = 1
print('\n************ Printing details of the file: ' + file +
' ************\n')
for x in file_lines:
print(
'------------------------------------------------------------')
print('File Lines No: ' + str(counter))
print(x)
print('\nCorresponding labels:')
print([self.reverseDict[i] for i in tags[counter - 1]])
print('\nCorresponding Label Indices:')
print(tags[counter - 1])
print(
'------------------------------------------------------------')
counter += 1
def save_data(self, obj_list, s_path):
pickle.dump(tuple(obj_list), open(s_path, 'wb'))
def acquire_data(self, task):
data = {}
if task == 'train':
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + '.con'
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1))
file_lines, tags = self.parse_summary(os.path.join(t_path, f))
tags = self.modify_labels(conceptList, tags)
data[f1] = [conceptList, file_lines, tags]
return data
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def prepare_data(self, task='train'):
line_list, tag_list = None, None
"""
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
"""
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
if not os.path.exists(os.path.join(self.save_path,
'label_dicts_bio.dat')):
self.initialize_labels()
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.
join(self.save_path, 'label_dicts_bio.dat'), 'rb'))
if not os.path.exists(os.path.join(self.save_path,
'object_dict_bio_' + str(task) + '.dat')):
data_dict = self.acquire_data(task)
line_list, tag_list = self.structure_data(data_dict)
line_list = self.embed_input(line_list)
self.save_data([line_list, tag_list], os.path.join(self.
save_path, 'object_dict_bio_' + str(task) + '.dat'))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.
save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx) / self.batch_size)
self.num_batches = num_batch
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch <= 0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=
torch.float32)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=
torch.long)
x_out = []
y_out = []
num_batch -= 1
yield self.num_batches - num_batch, x_out_array, y_out_array
counter += 1
def train_model(self):
loss_list = []
seq_length = []
last_batch = 0
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad()
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.
num_outputs), dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[
temp_size - i - 1])
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if batch_num % self.save_batch == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +
', Batch: ' + str(batch_num) + '/' + str(self.
num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +
'Batch Accuracy (Entity Prediction): {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self):
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0
result_dict['total_test'] = 0
result_dict['total_treatment'] = 0
result_dict['correct_problem'] = 0
result_dict['correct_test'] = 0
result_dict['correct_treatment'] = 0
result_dict['false_positive_problem'] = 0
result_dict['false_positive_test'] = 0
result_dict['false_positive_treatment'] = 0
print('\n')
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),
dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -
i - 1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'
] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag[
'test']
result_dict['total_treatment'] = result_dict['total_treatment'
] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'
] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'
] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'
] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict[
'false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict[
'false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict[
'false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print('Test Example ' + str(batch_num) + '/' + str(self.
num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
result_dict['accuracy'] = float(correct) / float(total) * 100.0
result_dict = self.calc_metrics(result_dict)
print('\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(
result_dict['accuracy']))
return result_dict
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + '/' + self.name + '_' + str(epoch
) + '_' + str(batch) + '_saved_model.pth.tar'
if option == 1:
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else:
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class task_NER:
def __init__(self):
self.name = 'NER_task_bio'
self.controller_size = 128
self.controller_layers = 1
self.num_read_heads = 1
self.num_write_heads = 1
self.num_inputs = 200
self.num_outputs = 7
self.memory_N = 128
self.memory_M = 128
self.num_batches = -1
self.save_batch = 5
self.batch_size = 10
self.num_epoch = 4
self.adam_lr = 0.0001
self.adam_betas = 0.9, 0.999
self.adam_eps = 1e-08
self.machine = None
self.loss = None
self.optimizer = None
self.labelDict = None
self.reverseDict = None
self.concept_path_train = '../medical_data/train_data/concept'
self.text_path_train = '../medical_data/train_data/txt'
self.concept_path_test = '../medical_data/test_data/concept'
self.text_path_test = '../medical_data/test_data/txt'
self.save_path = '../medical_data/cleaned_files'
self.embed_dic_path = (
'../medical_data/embeddings/bio_embedding_dictionary.dat')
self.random_vec = '../medical_data/embeddings/random_vec.dat'
self.model_path = '../saved_models/'
self.padding_symbol = np.full(self.num_inputs, 0.01)
def get_task_name(self):
return self.name
def init_dnc(self):
self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.
controller_size, self.controller_layers, self.num_read_heads,
self.num_write_heads, self.memory_N, self.memory_M)
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction='mean')
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr=self.
adam_lr, betas=self.adam_betas, eps=self.adam_eps)
def calc_loss(self, Y_pred, Y):
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
def calc_cost(self, Y_pred, Y):
"""
Note:
1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.
2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.
"""
class_bag = {}
class_bag['problem'] = 0
class_bag['test'] = 0
class_bag['treatment'] = 0
class_bag['problem_cor'] = 0
class_bag['test_cor'] = 0
class_bag['treatment_cor'] = 0
class_bag['problem_fp'] = 0
class_bag['test_fp'] = 0
class_bag['treatment_fp'] = 0
pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()
).reshape(-1)
Y = np.transpose(Y.numpy()).reshape(-1)
cor_pred = (Y == pred_class).astype(np.int)
class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size
) * 100.0
beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])
target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1
if target[0] == -1:
target = target[1:]
end_idx = list(target[np.where(Y[target] != 6)[0]])
if Y[-1] != 6:
end_idx.append(Y.size - 1)
assert len(beg_idx) == len(end_idx)
class_bag['total'] = len(beg_idx)
sum_vec = np.cumsum(cor_pred)
for b, e in zip(beg_idx, end_idx):
idx_range = e - b + 1
sum_range = sum_vec[e] - sum_vec[b] + 1
lab = self.reverseDict[Y[b]][2:]
class_bag[lab] = class_bag[lab] + 1
if sum_range == idx_range:
class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1
beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])
for b in beg_idx_p:
if cor_pred[b] == 0:
lab = self.reverseDict[pred_class[b]][2:]
class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1
return class_bag
def print_word(self, token_class):
word = self.reverseDict[token_class]
print(word + '\n')
def clip_grads(self):
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine
.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self):
self.labelDict = {}
self.reverseDict = {}
self.labelDict['b-problem'] = 0
self.labelDict['i-problem'] = 1
self.labelDict['b-test'] = 2
self.labelDict['i-test'] = 3
self.labelDict['b-treatment'] = 4
self.labelDict['i-treatment'] = 5
self.labelDict['o'] = 6
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
self.save_data([self.labelDict, self.reverseDict], os.path.join(
self.save_path, 'label_dicts_bio.dat'))
def parse_concepts(self, file_path):
conceptList = []
f = open(file_path)
content = f.readlines()
f.close()
for x in content:
dic = {}
x = re.sub('\n', ' ', x)
x = re.sub('\\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i'] * len(entity)
lab[0] = 'b'
lab = [(l + '-' + label) for l in lab]
else:
print('Data in File: ' + file_path +
', not in expected format..')
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split
(':')[1])
eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split
(':')[1])
"""
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
"""
dic['entity'] = entity
dic['label'] = label
dic['BIO_labels'] = lab
dic['label_index'] = noLab
dic['start_line'] = sLine
dic['start_word_no'] = sCol
dic['end_line'] = eLine
dic['end_word_no'] = eCol
conceptList.append(dic)
return conceptList
def parse_summary(self, file_path):
file_lines = []
tags = []
default_label = len(self.labelDict) - 1
f = open(file_path)
content = f.readlines()
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub('\\ +', ' ', x)
file_lines.append(x.strip().split(' '))
tags.append([default_label] * len(file_lines[-1]))
"""
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("
Corresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
"""
assert len(tags[-1]) == len(file_lines[-1]
), 'Line length is not matching labels length...'
return file_lines, tags
def modify_labels(self, conceptList, tags):
for e in conceptList:
if e['start_line'] == e['end_line']:
tags[e['start_line'] - 1][e['start_word_no']:e[
'end_word_no'] + 1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end + 1):
if i == start:
tags[i - 1][e['start_word_no']:] = e['label_index'][
0:len(tags[i - 1]) - e['start_word_no']]
beg = len(tags[i - 1]) - e['start_word_no']
elif i == end:
tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][
beg:]
else:
tags[i - 1][:] = e['label_index'][beg:beg + len(
tags[i - 1])]
beg = beg + len(tags[i - 1])
return tags
def print_data(self, file, file_lines, tags):
counter = 1
print('\n************ Printing details of the file: ' + file +
' ************\n')
for x in file_lines:
print(
'------------------------------------------------------------')
print('File Lines No: ' + str(counter))
print(x)
print('\nCorresponding labels:')
print([self.reverseDict[i] for i in tags[counter - 1]])
print('\nCorresponding Label Indices:')
print(tags[counter - 1])
print(
'------------------------------------------------------------')
counter += 1
def save_data(self, obj_list, s_path):
pickle.dump(tuple(obj_list), open(s_path, 'wb'))
def acquire_data(self, task):
data = {}
if task == 'train':
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + '.con'
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1))
file_lines, tags = self.parse_summary(os.path.join(t_path, f))
tags = self.modify_labels(conceptList, tags)
data[f1] = [conceptList, file_lines, tags]
return data
def structure_data(self, data_dict):
final_line_list = []
final_tag_list = []
for k in data_dict.keys():
file_lines = data_dict[k][1]
tags = data_dict[k][2]
temp1 = []
temp2 = []
for i in range(len(file_lines)):
temp1.extend(file_lines[i])
temp2.extend(tags[i])
assert len(temp1) == len(temp2
), 'Word length not matching Label length for story in ' + str(
k)
final_line_list.append(temp1)
final_tag_list.append(temp2)
assert len(final_line_list) == len(final_tag_list
), 'Number of stories not matching number of labels list'
return final_line_list, final_tag_list
def padding(self, line_list, tag_list):
diff = 0
max_len = 0
outside_class = len(self.labelDict) - 1
for i in range(len(line_list)):
if len(line_list[i]) > max_len:
max_len = len(line_list[i])
for i in range(len(line_list)):
diff = max_len - len(line_list[i])
line_list[i].extend([self.padding_symbol] * diff)
tag_list[i].extend([outside_class] * diff)
assert len(line_list[i]) == max_len and len(line_list[i]) == len(
tag_list[i]), 'Padding unsuccessful'
return np.asarray(line_list), np.asarray(tag_list)
<|reserved_special_token_0|>
def prepare_data(self, task='train'):
line_list, tag_list = None, None
"""
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
"""
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
if not os.path.exists(os.path.join(self.save_path,
'label_dicts_bio.dat')):
self.initialize_labels()
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.
join(self.save_path, 'label_dicts_bio.dat'), 'rb'))
if not os.path.exists(os.path.join(self.save_path,
'object_dict_bio_' + str(task) + '.dat')):
data_dict = self.acquire_data(task)
line_list, tag_list = self.structure_data(data_dict)
line_list = self.embed_input(line_list)
self.save_data([line_list, tag_list], os.path.join(self.
save_path, 'object_dict_bio_' + str(task) + '.dat'))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.
save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx) / self.batch_size)
self.num_batches = num_batch
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch <= 0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=
torch.float32)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=
torch.long)
x_out = []
y_out = []
num_batch -= 1
yield self.num_batches - num_batch, x_out_array, y_out_array
counter += 1
def train_model(self):
loss_list = []
seq_length = []
last_batch = 0
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad()
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.
num_outputs), dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[
temp_size - i - 1])
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if batch_num % self.save_batch == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +
', Batch: ' + str(batch_num) + '/' + str(self.
num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +
'Batch Accuracy (Entity Prediction): {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self):
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0
result_dict['total_test'] = 0
result_dict['total_treatment'] = 0
result_dict['correct_problem'] = 0
result_dict['correct_test'] = 0
result_dict['correct_treatment'] = 0
result_dict['false_positive_problem'] = 0
result_dict['false_positive_test'] = 0
result_dict['false_positive_treatment'] = 0
print('\n')
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),
dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -
i - 1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'
] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag[
'test']
result_dict['total_treatment'] = result_dict['total_treatment'
] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'
] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'
] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'
] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict[
'false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict[
'false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict[
'false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print('Test Example ' + str(batch_num) + '/' + str(self.
num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
result_dict['accuracy'] = float(correct) / float(total) * 100.0
result_dict = self.calc_metrics(result_dict)
print('\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(
result_dict['accuracy']))
return result_dict
def calc_metrics(self, result_dict):
precision_p = float(result_dict['correct_problem']) / float(
result_dict['correct_problem'] + result_dict[
'false_positive_problem'])
recall_p = float(result_dict['correct_problem']) / float(result_dict
['total_problem'])
precision_ts = float(result_dict['correct_test']) / float(
result_dict['correct_test'] + result_dict['false_positive_test'])
recall_ts = float(result_dict['correct_test']) / float(result_dict[
'total_test'])
precision_tr = float(result_dict['correct_treatment']) / float(
result_dict['correct_treatment'] + result_dict[
'false_positive_treatment'])
recall_tr = float(result_dict['correct_treatment']) / float(result_dict
['total_treatment'])
f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)
f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)
f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)
result_dict['problem_precision'] = precision_p
result_dict['problem_recall'] = recall_p
result_dict['problem_f1'] = f_score_p
result_dict['test_precision'] = precision_ts
result_dict['test_recall'] = recall_ts
result_dict['test_f1'] = f_score_ts
result_dict['treatment_precision'] = precision_tr
result_dict['treatment_recall'] = recall_tr
result_dict['treatment_f1'] = f_score_tr
result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr
) / 3.0
correct_sum = result_dict['correct_problem'] + result_dict[
'correct_test'] + result_dict['correct_treatment']
fp_sum = result_dict['false_positive_problem'] + result_dict[
'false_positive_test'] + result_dict['false_positive_treatment']
total_sum = result_dict['total_problem'] + result_dict['total_test'
] + result_dict['total_treatment']
precision_avg = float(correct_sum) / float(correct_sum + fp_sum)
recall_avg = float(correct_sum) / float(total_sum)
result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (
precision_avg + recall_avg)
return result_dict
def save_model(self, curr_epoch, curr_batch):
if not os.path.exists(os.path.join(self.model_path, self.name)):
os.mkdir(os.path.join(self.model_path, self.name))
state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,
'start_batch': curr_batch + 1, 'state_dict': self.machine.
state_dict(), 'optimizer_dic': self.optimizer.state_dict()}
filename = self.model_path + self.name + '/' + self.name + '_' + str(
curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'
torch.save(state_dic, filename)
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + '/' + self.name + '_' + str(epoch
) + '_' + str(batch) + '_saved_model.pth.tar'
if option == 1:
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else:
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class task_NER:
def __init__(self):
self.name = 'NER_task_bio'
self.controller_size = 128
self.controller_layers = 1
self.num_read_heads = 1
self.num_write_heads = 1
self.num_inputs = 200
self.num_outputs = 7
self.memory_N = 128
self.memory_M = 128
self.num_batches = -1
self.save_batch = 5
self.batch_size = 10
self.num_epoch = 4
self.adam_lr = 0.0001
self.adam_betas = 0.9, 0.999
self.adam_eps = 1e-08
self.machine = None
self.loss = None
self.optimizer = None
self.labelDict = None
self.reverseDict = None
self.concept_path_train = '../medical_data/train_data/concept'
self.text_path_train = '../medical_data/train_data/txt'
self.concept_path_test = '../medical_data/test_data/concept'
self.text_path_test = '../medical_data/test_data/txt'
self.save_path = '../medical_data/cleaned_files'
self.embed_dic_path = (
'../medical_data/embeddings/bio_embedding_dictionary.dat')
self.random_vec = '../medical_data/embeddings/random_vec.dat'
self.model_path = '../saved_models/'
self.padding_symbol = np.full(self.num_inputs, 0.01)
def get_task_name(self):
return self.name
def init_dnc(self):
self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.
controller_size, self.controller_layers, self.num_read_heads,
self.num_write_heads, self.memory_N, self.memory_M)
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction='mean')
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr=self.
adam_lr, betas=self.adam_betas, eps=self.adam_eps)
def calc_loss(self, Y_pred, Y):
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
def calc_cost(self, Y_pred, Y):
"""
Note:
1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.
2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.
"""
class_bag = {}
class_bag['problem'] = 0
class_bag['test'] = 0
class_bag['treatment'] = 0
class_bag['problem_cor'] = 0
class_bag['test_cor'] = 0
class_bag['treatment_cor'] = 0
class_bag['problem_fp'] = 0
class_bag['test_fp'] = 0
class_bag['treatment_fp'] = 0
pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()
).reshape(-1)
Y = np.transpose(Y.numpy()).reshape(-1)
cor_pred = (Y == pred_class).astype(np.int)
class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size
) * 100.0
beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])
target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1
if target[0] == -1:
target = target[1:]
end_idx = list(target[np.where(Y[target] != 6)[0]])
if Y[-1] != 6:
end_idx.append(Y.size - 1)
assert len(beg_idx) == len(end_idx)
class_bag['total'] = len(beg_idx)
sum_vec = np.cumsum(cor_pred)
for b, e in zip(beg_idx, end_idx):
idx_range = e - b + 1
sum_range = sum_vec[e] - sum_vec[b] + 1
lab = self.reverseDict[Y[b]][2:]
class_bag[lab] = class_bag[lab] + 1
if sum_range == idx_range:
class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1
beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])
for b in beg_idx_p:
if cor_pred[b] == 0:
lab = self.reverseDict[pred_class[b]][2:]
class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1
return class_bag
def print_word(self, token_class):
word = self.reverseDict[token_class]
print(word + '\n')
def clip_grads(self):
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine
.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self):
self.labelDict = {}
self.reverseDict = {}
self.labelDict['b-problem'] = 0
self.labelDict['i-problem'] = 1
self.labelDict['b-test'] = 2
self.labelDict['i-test'] = 3
self.labelDict['b-treatment'] = 4
self.labelDict['i-treatment'] = 5
self.labelDict['o'] = 6
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
self.save_data([self.labelDict, self.reverseDict], os.path.join(
self.save_path, 'label_dicts_bio.dat'))
def parse_concepts(self, file_path):
conceptList = []
f = open(file_path)
content = f.readlines()
f.close()
for x in content:
dic = {}
x = re.sub('\n', ' ', x)
x = re.sub('\\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i'] * len(entity)
lab[0] = 'b'
lab = [(l + '-' + label) for l in lab]
else:
print('Data in File: ' + file_path +
', not in expected format..')
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split
(':')[1])
eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split
(':')[1])
"""
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
"""
dic['entity'] = entity
dic['label'] = label
dic['BIO_labels'] = lab
dic['label_index'] = noLab
dic['start_line'] = sLine
dic['start_word_no'] = sCol
dic['end_line'] = eLine
dic['end_word_no'] = eCol
conceptList.append(dic)
return conceptList
def parse_summary(self, file_path):
file_lines = []
tags = []
default_label = len(self.labelDict) - 1
f = open(file_path)
content = f.readlines()
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub('\\ +', ' ', x)
file_lines.append(x.strip().split(' '))
tags.append([default_label] * len(file_lines[-1]))
"""
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("
Corresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
"""
assert len(tags[-1]) == len(file_lines[-1]
), 'Line length is not matching labels length...'
return file_lines, tags
def modify_labels(self, conceptList, tags):
for e in conceptList:
if e['start_line'] == e['end_line']:
tags[e['start_line'] - 1][e['start_word_no']:e[
'end_word_no'] + 1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end + 1):
if i == start:
tags[i - 1][e['start_word_no']:] = e['label_index'][
0:len(tags[i - 1]) - e['start_word_no']]
beg = len(tags[i - 1]) - e['start_word_no']
elif i == end:
tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][
beg:]
else:
tags[i - 1][:] = e['label_index'][beg:beg + len(
tags[i - 1])]
beg = beg + len(tags[i - 1])
return tags
def print_data(self, file, file_lines, tags):
counter = 1
print('\n************ Printing details of the file: ' + file +
' ************\n')
for x in file_lines:
print(
'------------------------------------------------------------')
print('File Lines No: ' + str(counter))
print(x)
print('\nCorresponding labels:')
print([self.reverseDict[i] for i in tags[counter - 1]])
print('\nCorresponding Label Indices:')
print(tags[counter - 1])
print(
'------------------------------------------------------------')
counter += 1
def save_data(self, obj_list, s_path):
pickle.dump(tuple(obj_list), open(s_path, 'wb'))
def acquire_data(self, task):
data = {}
if task == 'train':
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + '.con'
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1))
file_lines, tags = self.parse_summary(os.path.join(t_path, f))
tags = self.modify_labels(conceptList, tags)
data[f1] = [conceptList, file_lines, tags]
return data
def structure_data(self, data_dict):
final_line_list = []
final_tag_list = []
for k in data_dict.keys():
file_lines = data_dict[k][1]
tags = data_dict[k][2]
temp1 = []
temp2 = []
for i in range(len(file_lines)):
temp1.extend(file_lines[i])
temp2.extend(tags[i])
assert len(temp1) == len(temp2
), 'Word length not matching Label length for story in ' + str(
k)
final_line_list.append(temp1)
final_tag_list.append(temp2)
assert len(final_line_list) == len(final_tag_list
), 'Number of stories not matching number of labels list'
return final_line_list, final_tag_list
def padding(self, line_list, tag_list):
diff = 0
max_len = 0
outside_class = len(self.labelDict) - 1
for i in range(len(line_list)):
if len(line_list[i]) > max_len:
max_len = len(line_list[i])
for i in range(len(line_list)):
diff = max_len - len(line_list[i])
line_list[i].extend([self.padding_symbol] * diff)
tag_list[i].extend([outside_class] * diff)
assert len(line_list[i]) == max_len and len(line_list[i]) == len(
tag_list[i]), 'Padding unsuccessful'
return np.asarray(line_list), np.asarray(tag_list)
def embed_input(self, line_list):
final_list = []
summary = None
word = None
temp = None
embed_dic = pickle.load(open(self.embed_dic_path, 'rb'))
r_embed = pickle.load(open(self.random_vec, 'rb'))
for i in range(len(line_list)):
summary = line_list[i]
final_list.append([])
for j in range(len(summary)):
word = summary[j].lower()
if word in embed_dic:
final_list[-1].append(embed_dic[word])
else:
temp = r_embed[:]
random.shuffle(temp)
temp = np.asarray(temp, dtype=np.float32)
final_list[-1].append(temp)
return final_list
def prepare_data(self, task='train'):
line_list, tag_list = None, None
"""
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
"""
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
if not os.path.exists(os.path.join(self.save_path,
'label_dicts_bio.dat')):
self.initialize_labels()
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.
join(self.save_path, 'label_dicts_bio.dat'), 'rb'))
if not os.path.exists(os.path.join(self.save_path,
'object_dict_bio_' + str(task) + '.dat')):
data_dict = self.acquire_data(task)
line_list, tag_list = self.structure_data(data_dict)
line_list = self.embed_input(line_list)
self.save_data([line_list, tag_list], os.path.join(self.
save_path, 'object_dict_bio_' + str(task) + '.dat'))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.
save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx) / self.batch_size)
self.num_batches = num_batch
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch <= 0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=
torch.float32)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=
torch.long)
x_out = []
y_out = []
num_batch -= 1
yield self.num_batches - num_batch, x_out_array, y_out_array
counter += 1
def train_model(self):
loss_list = []
seq_length = []
last_batch = 0
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad()
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.
num_outputs), dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[
temp_size - i - 1])
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if batch_num % self.save_batch == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +
', Batch: ' + str(batch_num) + '/' + str(self.
num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +
'Batch Accuracy (Entity Prediction): {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self):
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0
result_dict['total_test'] = 0
result_dict['total_treatment'] = 0
result_dict['correct_problem'] = 0
result_dict['correct_test'] = 0
result_dict['correct_treatment'] = 0
result_dict['false_positive_problem'] = 0
result_dict['false_positive_test'] = 0
result_dict['false_positive_treatment'] = 0
print('\n')
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size)
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),
dtype=torch.float32)
embeddings = self.machine.backward_prediction(X)
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -
i - 1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor'] + class_bag['test_cor'
] + class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'
] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag[
'test']
result_dict['total_treatment'] = result_dict['total_treatment'
] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'
] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'
] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'
] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict[
'false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict[
'false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict[
'false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print('Test Example ' + str(batch_num) + '/' + str(self.
num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.
format(float(corr) / float(tot) * 100.0) +
'Batch Accuracy (Word Prediction): {0:.2f} %'.format(
class_bag['word_pred_acc']))
result_dict['accuracy'] = float(correct) / float(total) * 100.0
result_dict = self.calc_metrics(result_dict)
print('\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(
result_dict['accuracy']))
return result_dict
def calc_metrics(self, result_dict):
precision_p = float(result_dict['correct_problem']) / float(
result_dict['correct_problem'] + result_dict[
'false_positive_problem'])
recall_p = float(result_dict['correct_problem']) / float(result_dict
['total_problem'])
precision_ts = float(result_dict['correct_test']) / float(
result_dict['correct_test'] + result_dict['false_positive_test'])
recall_ts = float(result_dict['correct_test']) / float(result_dict[
'total_test'])
precision_tr = float(result_dict['correct_treatment']) / float(
result_dict['correct_treatment'] + result_dict[
'false_positive_treatment'])
recall_tr = float(result_dict['correct_treatment']) / float(result_dict
['total_treatment'])
f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)
f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)
f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)
result_dict['problem_precision'] = precision_p
result_dict['problem_recall'] = recall_p
result_dict['problem_f1'] = f_score_p
result_dict['test_precision'] = precision_ts
result_dict['test_recall'] = recall_ts
result_dict['test_f1'] = f_score_ts
result_dict['treatment_precision'] = precision_tr
result_dict['treatment_recall'] = recall_tr
result_dict['treatment_f1'] = f_score_tr
result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr
) / 3.0
correct_sum = result_dict['correct_problem'] + result_dict[
'correct_test'] + result_dict['correct_treatment']
fp_sum = result_dict['false_positive_problem'] + result_dict[
'false_positive_test'] + result_dict['false_positive_treatment']
total_sum = result_dict['total_problem'] + result_dict['total_test'
] + result_dict['total_treatment']
precision_avg = float(correct_sum) / float(correct_sum + fp_sum)
recall_avg = float(correct_sum) / float(total_sum)
result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (
precision_avg + recall_avg)
return result_dict
def save_model(self, curr_epoch, curr_batch):
if not os.path.exists(os.path.join(self.model_path, self.name)):
os.mkdir(os.path.join(self.model_path, self.name))
state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,
'start_batch': curr_batch + 1, 'state_dict': self.machine.
state_dict(), 'optimizer_dic': self.optimizer.state_dict()}
filename = self.model_path + self.name + '/' + self.name + '_' + str(
curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'
torch.save(state_dic, filename)
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + '/' + self.name + '_' + str(epoch
) + '_' + str(batch) + '_saved_model.pth.tar'
if option == 1:
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else:
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval()
<|reserved_special_token_1|>
# Named Entity Recognition on Medical Data (BIO Tagging)
# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec
import os
import re
import torch
import pickle
from torch import nn
from torch import optim
import torch.nn.functional as F
import numpy as np
import random
from DNC.dnc import DNC_Module # Importing DNC Implementation
class task_NER():
def __init__(self):
self.name = "NER_task_bio"
# Controller Params
self.controller_size = 128
self.controller_layers = 1
# Head Params
self.num_read_heads = 1
self.num_write_heads = 1
# Processor Params
self.num_inputs = 200 # Length of Embeddings
self.num_outputs = 7 # Class size
# Memory Params
self.memory_N = 128
self.memory_M = 128
# Training Params
self.num_batches = -1
self.save_batch = 5 # Saving model after every save_batch number of batches
self.batch_size = 10
self.num_epoch = 4
# Optimizer Params
self.adam_lr = 1e-4
self.adam_betas = (0.9, 0.999)
self.adam_eps = 1e-8
# Handles
self.machine = None
self.loss = None
self.optimizer = None
# Class Dictionaries
self.labelDict = None # Label Dictionary - Labels to Index
self.reverseDict = None # Inverse Label Dictionary - Index to Labels
# File Paths
self.concept_path_train = "../medical_data/train_data/concept" # Path to train concept files
self.text_path_train = "../medical_data/train_data/txt" # Path to train text summaries
self.concept_path_test = "../medical_data/test_data/concept" # Path to test concept files
self.text_path_test = "../medical_data/test_data/txt" # Path to test text summaries
self.save_path = "../medical_data/cleaned_files" # Save path
self.embed_dic_path = "../medical_data/embeddings/bio_embedding_dictionary.dat" # Word2Vec embeddings Dictionary path
self.random_vec = "../medical_data/embeddings/random_vec.dat" # Path to random embedding (Used to create new vectors)
self.model_path = "../saved_models/" # Stores Trained Models
# Miscellaneous
self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding
def get_task_name(self):
return self.name
def init_dnc(self):
self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)
def calc_loss(self, Y_pred, Y):
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x num_outputs)
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
def calc_cost(self, Y_pred, Y): # Calculates % Cost
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x sequence_width)
'''
Note:
1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.
2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.
'''
# Stores correct class labels for each entity type
class_bag = {}
class_bag['problem'] = 0 # Total labels
class_bag['test'] = 0 # Total labels
class_bag['treatment'] = 0 # Total labels
class_bag['problem_cor'] = 0 # Correctly classified labels
class_bag['test_cor'] = 0 # Correctly classified labels
class_bag['treatment_cor'] = 0 # Correctly classified labels
class_bag['problem_fp'] = 0 # False positive classified labels
class_bag['test_fp'] = 0 # False positive classified labels
class_bag['treatment_fp'] = 0 # False positive classified labels
pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)
Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing
cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions
class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)
# Getting the beginning index of all the entities
beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])
# Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')
target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1
if target[0] == -1:
target = target[1:]
end_idx = list(target[np.where(Y[target] != 6)[0]])
if Y[-1] != 6:
end_idx.append(Y.size-1)
assert len(beg_idx) == len(end_idx) # Sanity Check
class_bag['total'] = len(beg_idx) # Total number of Entities
# Counting Entities
sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector
for b, e in zip(beg_idx, end_idx):
idx_range = e-b+1 # Entity span
sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly
lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)
class_bag[lab] = class_bag[lab]+1 # Getting count of each entities
if sum_range == idx_range: # +1 if entity is classified correctly
class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1
# Detecting False Positives
# Getting the beginning index of all the entities in Predicted Results
beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])
for b in beg_idx_p:
if cor_pred[b] == 0:
lab = self.reverseDict[pred_class[b]][2:]
class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1
return class_bag
def print_word(self, token_class): # Prints the Class name from Class number
word = self.reverseDict[token_class]
print(word + "\n")
def clip_grads(self): # Clipping gradients for stability
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels
self.labelDict = {} # Label Dictionary - Labels to Index
self.reverseDict = {} # Inverse Label Dictionary - Index to Labels
# Using BIEOS labelling scheme
self.labelDict['b-problem'] = 0 # Problem - Beginning
self.labelDict['i-problem'] = 1 # Problem - Inside
self.labelDict['b-test'] = 2 # Test - Beginning
self.labelDict['i-test'] = 3 # Test - Inside
self.labelDict['b-treatment'] = 4 # Treatment - Beginning
self.labelDict['i-treatment'] = 5 # Treatment - Inside
self.labelDict['o'] = 6 # Outside Token
# Making Inverse Label Dictionary
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
# Saving the diictionaries into a file
self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, "label_dicts_bio.dat"))
def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels
conceptList = [] # Stores all the Concept in the File
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close() # Closing the concept file
for x in content: # Reading each line in the concept file
dic = {}
# Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i']*len(entity)
lab[0] = 'b'
lab = [l+"-"+label for l in lab]
else:
print("Data in File: " + file_path + ", not in expected format..")
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(":")[0]), int(temp1[-2].split(":")[1])
eLine, eCol = int(temp1[-1].split(":")[0]), int(temp1[-1].split(":")[1])
'''
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
'''
# Storing the information as a dictionary
dic['entity'] = entity # Entity Name (In the form of list of words)
dic['label'] = label # Common Label
dic['BIO_labels'] = lab # List of BIO labels for each word
dic['label_index'] = noLab # Labels in the index form
dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries
dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line
dic['end_line'] = eLine # End line of the concept in the corresponding text summaries
dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line
# Appending the concept dictionary to the list
conceptList.append(dic)
return conceptList # Returning the all the concepts in the current file in the form of dictionary list
def parse_summary(self, file_path): # Parses the Text summaries
file_lines = [] # Stores the lins of files in the list form
tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])
default_label = len(self.labelDict)-1 # default_label is "7" (Corresponding to 'Other' entity)
# counter = 1 # Temporary variable used during print
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
file_lines.append(x.strip().split(" ")) # Spliting the lines into word list and Appending each of them in the file list
tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line
'''
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("\nCorresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
'''
assert len(tags[-1]) == len(file_lines[-1]), "Line length is not matching labels length..." # Sanity Check
return file_lines, tags
def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files
for e in conceptList: # Iterating over all the dictionary elements in the Concept List
if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary
tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries
if i == start:
tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]
beg = len(tags[i-1])-e['start_word_no']
elif i == end:
tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]
else:
tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]
beg = beg+len(tags[i-1])
return tags
def print_data(self, file, file_lines, tags): # Prints the given data
counter = 1
print("\n************ Printing details of the file: " + file + " ************\n")
for x in file_lines:
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(x)
print("\nCorresponding labels:")
print([self.reverseDict[i] for i in tags[counter-1]])
print("\nCorresponding Label Indices:")
print(tags[counter-1])
print("------------------------------------------------------------")
counter += 1
def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle
# Note: The 'obj_list' must be a list and none other than that
pickle.dump(tuple(obj_list), open(s_path,'wb'))
def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them
data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name
if task == 'train': # Determining the task type to assign the data path accordingly
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + ".con"
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file
file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes
tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files
data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary
# self.print_data(f, file_lines, tags) # Printing the details
return data
def structure_data(self, data_dict): # Structures the data in proper trainable form
final_line_list = [] # Stores words of all the files in separate sub-lists
final_tag_list = [] # Stores tags of all the files in separate sub-lists
for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary
file_lines = data_dict[k][1] # Extracting story
tags = data_dict[k][2] # Extracting corresponding labels
# Creating empty lists
temp1 = []
temp2 = []
# Merging all the lines in file into a single list. Same for corresponding labels
for i in range(len(file_lines)):
temp1.extend(file_lines[i])
temp2.extend(tags[i])
assert len(temp1) == len(temp2), "Word length not matching Label length for story in " + str(k) # Sanity Check
final_line_list.append(temp1)
final_tag_list.append(temp2)
assert len(final_line_list) == len(final_tag_list), "Number of stories not matching number of labels list" # Sanity Check
return final_line_list, final_tag_list
def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length
diff = 0
max_len = 0
outside_class = len(self.labelDict)-1 # Classifying padding symbol as "outside" term
# Calculating Max Summary Length
for i in range(len(line_list)):
if len(line_list[i])>max_len:
max_len = len(line_list[i])
for i in range(len(line_list)):
diff = max_len - len(line_list[i])
line_list[i].extend([self.padding_symbol]*diff)
tag_list[i].extend([outside_class]*diff)
assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), "Padding unsuccessful" # Sanity check
return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively
def embed_input(self, line_list): # Converts words to vector embeddings
final_list = [] # Stores embedded words
summary = None # Temp variable
word = None # Temp variable
temp = None # Temp variable
embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle
r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding
for i in range(len(line_list)): # Iterating over all the summaries
summary = line_list[i]
final_list.append([]) # Reserving space for curent summary
for j in range(len(summary)):
word = summary[j].lower()
if word in embed_dic: # Checking for existence of word in dictionary
final_list[-1].append(embed_dic[word])
else:
temp = r_embed[:] # Copying the values of the list
random.shuffle(temp) # Randomly shuffling the word embedding to make it unique
temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array
final_list[-1].append(temp)
return final_list
def prepare_data(self, task='train'): # Preparing all the data necessary
line_list, tag_list = None, None
'''
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
'''
if not os.path.exists(self.save_path):
os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data
if not os.path.exists(os.path.join(self.save_path, "label_dicts_bio.dat")):
self.initialize_labels() # Initialize label to index dictionaries
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, "label_dicts_bio.dat"), 'rb')) # Loading Label dictionaries
if not os.path.exists(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat")):
data_dict = self.acquire_data(task) # Read data from file
line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form
line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings
self.save_data([line_list, tag_list], os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"), 'rb')) # Loading Data dictionary
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
# Shuffling stories
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx)/self.batch_size)
self.num_batches = num_batch
# Out Data
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch<=0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
# Padding and converting labels to one hot vectors
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)
x_out = []
y_out = []
num_batch -= 1
yield (self.num_batches - num_batch), x_out_array, y_out_array
counter += 1
def train_model(self):
# Here, the model is optimized using Cross Entropy Loss.
loss_list = []
seq_length = []
last_batch = 0
# self.load_model(1, 99, 13) # Loading Pre-Trained model to train further
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if (batch_num % self.save_batch) == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print("Epoch: " + str(j) + "/" + str(self.num_epoch) + ", Batch: " + str(batch_num) + "/" + str(self.num_batches) + ", Loss: {0:.2f}, ".format(loss.item()) + \
"Batch Accuracy (Entity Prediction): {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self): # Testing the model
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0 # Total labels in data
result_dict['total_test'] = 0 # Total labels in data
result_dict['total_treatment'] = 0 # Total labels in data
result_dict['correct_problem'] = 0 # Correctly classified labels
result_dict['correct_test'] = 0 # Correctly classified labels
result_dict['correct_treatment'] = 0 # Correctly classified labels
result_dict['false_positive_problem'] = 0 # False Positive labels
result_dict['false_positive_test'] = 0 # False Positive labels
result_dict['false_positive_treatment'] = 0 # False Positive labels
print("\n")
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag['test']
result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print("Test Example " + str(batch_num) + "/" + str(self.num_batches) + " processed, Batch Accuracy: {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
result_dict['accuracy'] = (float(correct)/float(total))*100.0
result_dict = self.calc_metrics(result_dict)
print("\nOverall Entity Prediction Accuracy: {0:.2f} %".format(result_dict['accuracy']))
return result_dict
def calc_metrics(self, result_dict): # Calculates Certain Metrices
precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision
recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall
precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision
recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall
precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision
recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall
f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score
f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score
f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score
result_dict['problem_precision'] = precision_p
result_dict['problem_recall'] = recall_p
result_dict['problem_f1'] = f_score_p
result_dict['test_precision'] = precision_ts
result_dict['test_recall'] = recall_ts
result_dict['test_f1'] = f_score_ts
result_dict['treatment_precision'] = precision_tr
result_dict['treatment_recall'] = recall_tr
result_dict['treatment_f1'] = f_score_tr
result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score
# Micro Average F1 Score
correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']
fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']
total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']
precision_avg = float(correct_sum)/float(correct_sum + fp_sum)
recall_avg = float(correct_sum)/float(total_sum)
result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)
return result_dict
def save_model(self, curr_epoch, curr_batch):
# Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading
# Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly
if not os.path.exists(os.path.join(self.model_path, self.name)):
os.mkdir(os.path.join(self.model_path, self.name))
state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}
filename = self.model_path + self.name + "/" + self.name + "_" + str(curr_epoch) + "_" + str(curr_batch) + "_saved_model.pth.tar"
torch.save(state_dic, filename)
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + "/" + self.name + "_" + str(epoch) + "_" + str(batch) + "_saved_model.pth.tar"
if option == 1: # Loading for training
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else: # Loading for testing
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval()
|
flexible
|
{
"blob_id": "eb99def75404bc3b674bcb633714009149f2d50d",
"index": 5097,
"step-1": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n <mask token>\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n <mask token>\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n <mask token>\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n <mask token>\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n <mask token>\n <mask token>\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n <mask token>\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n <mask token>\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n <mask token>\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n <mask token>\n <mask token>\n <mask token>\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n <mask token>\n <mask token>\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-3": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.\n controller_size, self.controller_layers, self.num_read_heads,\n self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y):\n \"\"\"\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n \"\"\"\n class_bag = {}\n class_bag['problem'] = 0\n class_bag['test'] = 0\n class_bag['treatment'] = 0\n class_bag['problem_cor'] = 0\n class_bag['test_cor'] = 0\n class_bag['treatment_cor'] = 0\n class_bag['problem_fp'] = 0\n class_bag['test_fp'] = 0\n class_bag['treatment_fp'] = 0\n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()\n ).reshape(-1)\n Y = np.transpose(Y.numpy()).reshape(-1)\n cor_pred = (Y == pred_class).astype(np.int)\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size\n ) * 100.0\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size - 1)\n assert len(beg_idx) == len(end_idx)\n class_bag['total'] = len(beg_idx)\n sum_vec = np.cumsum(cor_pred)\n for b, e in zip(beg_idx, end_idx):\n idx_range = e - b + 1\n sum_range = sum_vec[e] - sum_vec[b] + 1\n lab = self.reverseDict[Y[b]][2:]\n class_bag[lab] = class_bag[lab] + 1\n if sum_range == idx_range:\n class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1\n return class_bag\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n\n def structure_data(self, data_dict):\n final_line_list = []\n final_tag_list = []\n for k in data_dict.keys():\n file_lines = data_dict[k][1]\n tags = data_dict[k][2]\n temp1 = []\n temp2 = []\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n assert len(temp1) == len(temp2\n ), 'Word length not matching Label length for story in ' + str(\n k)\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n assert len(final_line_list) == len(final_tag_list\n ), 'Number of stories not matching number of labels list'\n return final_line_list, final_tag_list\n\n def padding(self, line_list, tag_list):\n diff = 0\n max_len = 0\n outside_class = len(self.labelDict) - 1\n for i in range(len(line_list)):\n if len(line_list[i]) > max_len:\n max_len = len(line_list[i])\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol] * diff)\n tag_list[i].extend([outside_class] * diff)\n assert len(line_list[i]) == max_len and len(line_list[i]) == len(\n tag_list[i]), 'Padding unsuccessful'\n return np.asarray(line_list), np.asarray(tag_list)\n <mask token>\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict):\n precision_p = float(result_dict['correct_problem']) / float(\n result_dict['correct_problem'] + result_dict[\n 'false_positive_problem'])\n recall_p = float(result_dict['correct_problem']) / float(result_dict\n ['total_problem'])\n precision_ts = float(result_dict['correct_test']) / float(\n result_dict['correct_test'] + result_dict['false_positive_test'])\n recall_ts = float(result_dict['correct_test']) / float(result_dict[\n 'total_test'])\n precision_tr = float(result_dict['correct_treatment']) / float(\n result_dict['correct_treatment'] + result_dict[\n 'false_positive_treatment'])\n recall_tr = float(result_dict['correct_treatment']) / float(result_dict\n ['total_treatment'])\n f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)\n f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)\n f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr\n ) / 3.0\n correct_sum = result_dict['correct_problem'] + result_dict[\n 'correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict[\n 'false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'\n ] + result_dict['total_treatment']\n precision_avg = float(correct_sum) / float(correct_sum + fp_sum)\n recall_avg = float(correct_sum) / float(total_sum)\n result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (\n precision_avg + recall_avg)\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,\n 'start_batch': curr_batch + 1, 'state_dict': self.machine.\n state_dict(), 'optimizer_dic': self.optimizer.state_dict()}\n filename = self.model_path + self.name + '/' + self.name + '_' + str(\n curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-4": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.\n controller_size, self.controller_layers, self.num_read_heads,\n self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y):\n \"\"\"\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n \"\"\"\n class_bag = {}\n class_bag['problem'] = 0\n class_bag['test'] = 0\n class_bag['treatment'] = 0\n class_bag['problem_cor'] = 0\n class_bag['test_cor'] = 0\n class_bag['treatment_cor'] = 0\n class_bag['problem_fp'] = 0\n class_bag['test_fp'] = 0\n class_bag['treatment_fp'] = 0\n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()\n ).reshape(-1)\n Y = np.transpose(Y.numpy()).reshape(-1)\n cor_pred = (Y == pred_class).astype(np.int)\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size\n ) * 100.0\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size - 1)\n assert len(beg_idx) == len(end_idx)\n class_bag['total'] = len(beg_idx)\n sum_vec = np.cumsum(cor_pred)\n for b, e in zip(beg_idx, end_idx):\n idx_range = e - b + 1\n sum_range = sum_vec[e] - sum_vec[b] + 1\n lab = self.reverseDict[Y[b]][2:]\n class_bag[lab] = class_bag[lab] + 1\n if sum_range == idx_range:\n class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1\n return class_bag\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n\n def structure_data(self, data_dict):\n final_line_list = []\n final_tag_list = []\n for k in data_dict.keys():\n file_lines = data_dict[k][1]\n tags = data_dict[k][2]\n temp1 = []\n temp2 = []\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n assert len(temp1) == len(temp2\n ), 'Word length not matching Label length for story in ' + str(\n k)\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n assert len(final_line_list) == len(final_tag_list\n ), 'Number of stories not matching number of labels list'\n return final_line_list, final_tag_list\n\n def padding(self, line_list, tag_list):\n diff = 0\n max_len = 0\n outside_class = len(self.labelDict) - 1\n for i in range(len(line_list)):\n if len(line_list[i]) > max_len:\n max_len = len(line_list[i])\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol] * diff)\n tag_list[i].extend([outside_class] * diff)\n assert len(line_list[i]) == max_len and len(line_list[i]) == len(\n tag_list[i]), 'Padding unsuccessful'\n return np.asarray(line_list), np.asarray(tag_list)\n\n def embed_input(self, line_list):\n final_list = []\n summary = None\n word = None\n temp = None\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb'))\n r_embed = pickle.load(open(self.random_vec, 'rb'))\n for i in range(len(line_list)):\n summary = line_list[i]\n final_list.append([])\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic:\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:]\n random.shuffle(temp)\n temp = np.asarray(temp, dtype=np.float32)\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict):\n precision_p = float(result_dict['correct_problem']) / float(\n result_dict['correct_problem'] + result_dict[\n 'false_positive_problem'])\n recall_p = float(result_dict['correct_problem']) / float(result_dict\n ['total_problem'])\n precision_ts = float(result_dict['correct_test']) / float(\n result_dict['correct_test'] + result_dict['false_positive_test'])\n recall_ts = float(result_dict['correct_test']) / float(result_dict[\n 'total_test'])\n precision_tr = float(result_dict['correct_treatment']) / float(\n result_dict['correct_treatment'] + result_dict[\n 'false_positive_treatment'])\n recall_tr = float(result_dict['correct_treatment']) / float(result_dict\n ['total_treatment'])\n f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)\n f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)\n f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr\n ) / 3.0\n correct_sum = result_dict['correct_problem'] + result_dict[\n 'correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict[\n 'false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'\n ] + result_dict['total_treatment']\n precision_avg = float(correct_sum) / float(correct_sum + fp_sum)\n recall_avg = float(correct_sum) / float(total_sum)\n result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (\n precision_avg + recall_avg)\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,\n 'start_batch': curr_batch + 1, 'state_dict': self.machine.\n state_dict(), 'optimizer_dic': self.optimizer.state_dict()}\n filename = self.model_path + self.name + '/' + self.name + '_' + str(\n curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-5": "# Named Entity Recognition on Medical Data (BIO Tagging)\n# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec\n\nimport os\nimport re\nimport torch\nimport pickle\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport random\n\nfrom DNC.dnc import DNC_Module # Importing DNC Implementation\n\nclass task_NER():\n\n def __init__(self):\n self.name = \"NER_task_bio\"\n\n # Controller Params\n self.controller_size = 128\n self.controller_layers = 1\n\n # Head Params\n self.num_read_heads = 1\n self.num_write_heads = 1\n\n # Processor Params\n self.num_inputs = 200 # Length of Embeddings\n self.num_outputs = 7 # Class size\n\n # Memory Params\n self.memory_N = 128\n self.memory_M = 128\n\n # Training Params\n self.num_batches = -1\n self.save_batch = 5 # Saving model after every save_batch number of batches\n self.batch_size = 10\n self.num_epoch = 4\n\n # Optimizer Params\n self.adam_lr = 1e-4\n self.adam_betas = (0.9, 0.999)\n self.adam_eps = 1e-8\n\n # Handles\n self.machine = None\n self.loss = None\n self.optimizer = None\n\n # Class Dictionaries\n self.labelDict = None # Label Dictionary - Labels to Index\n self.reverseDict = None # Inverse Label Dictionary - Index to Labels\n\n # File Paths\n self.concept_path_train = \"../medical_data/train_data/concept\" # Path to train concept files\n self.text_path_train = \"../medical_data/train_data/txt\" # Path to train text summaries\n self.concept_path_test = \"../medical_data/test_data/concept\" # Path to test concept files\n self.text_path_test = \"../medical_data/test_data/txt\" # Path to test text summaries\n self.save_path = \"../medical_data/cleaned_files\" # Save path\n self.embed_dic_path = \"../medical_data/embeddings/bio_embedding_dictionary.dat\" # Word2Vec embeddings Dictionary path\n self.random_vec = \"../medical_data/embeddings/random_vec.dat\" # Path to random embedding (Used to create new vectors)\n self.model_path = \"../saved_models/\" # Stores Trained Models\n\n # Miscellaneous\n self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x num_outputs)\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y): # Calculates % Cost\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x sequence_width)\n\n '''\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n '''\n\n # Stores correct class labels for each entity type\n class_bag = {}\n class_bag['problem'] = 0 # Total labels\n class_bag['test'] = 0 # Total labels\n class_bag['treatment'] = 0 # Total labels\n class_bag['problem_cor'] = 0 # Correctly classified labels\n class_bag['test_cor'] = 0 # Correctly classified labels\n class_bag['treatment_cor'] = 0 # Correctly classified labels\n class_bag['problem_fp'] = 0 # False positive classified labels\n class_bag['test_fp'] = 0 # False positive classified labels\n class_bag['treatment_fp'] = 0 # False positive classified labels\n \n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)\n Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing\n cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions\n\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)\n\n # Getting the beginning index of all the entities\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n\n # Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size-1)\n\n assert len(beg_idx) == len(end_idx) # Sanity Check\n class_bag['total'] = len(beg_idx) # Total number of Entities\n\n # Counting Entities\n sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector\n for b, e in zip(beg_idx, end_idx):\n idx_range = e-b+1 # Entity span\n sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly\n\n lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)\n class_bag[lab] = class_bag[lab]+1 # Getting count of each entities\n \n if sum_range == idx_range: # +1 if entity is classified correctly\n class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1\n\n # Detecting False Positives\n # Getting the beginning index of all the entities in Predicted Results\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n \n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1\n\n return class_bag\n \n def print_word(self, token_class): # Prints the Class name from Class number\n word = self.reverseDict[token_class]\n print(word + \"\\n\")\n\n def clip_grads(self): # Clipping gradients for stability\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels\n self.labelDict = {} # Label Dictionary - Labels to Index\n self.reverseDict = {} # Inverse Label Dictionary - Index to Labels\n\n # Using BIEOS labelling scheme\n self.labelDict['b-problem'] = 0 # Problem - Beginning \n self.labelDict['i-problem'] = 1 # Problem - Inside\n self.labelDict['b-test'] = 2 # Test - Beginning\n self.labelDict['i-test'] = 3 # Test - Inside\n self.labelDict['b-treatment'] = 4 # Treatment - Beginning\n self.labelDict['i-treatment'] = 5 # Treatment - Inside\n self.labelDict['o'] = 6 # Outside Token\n\n # Making Inverse Label Dictionary\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n\n # Saving the diictionaries into a file\n self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, \"label_dicts_bio.dat\"))\n\n def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels\n conceptList = [] # Stores all the Concept in the File\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close() # Closing the concept file\n\n for x in content: # Reading each line in the concept file\n dic = {}\n\n # Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n x = x.strip().split('||')\n\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n\n if len(entity) >= 1:\n lab = ['i']*len(entity)\n lab[0] = 'b'\n lab = [l+\"-\"+label for l in lab]\n else:\n print(\"Data in File: \" + file_path + \", not in expected format..\")\n exit()\n\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(\":\")[0]), int(temp1[-2].split(\":\")[1])\n eLine, eCol = int(temp1[-1].split(\":\")[0]), int(temp1[-1].split(\":\")[1])\n \n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n '''\n\n # Storing the information as a dictionary\n dic['entity'] = entity # Entity Name (In the form of list of words)\n dic['label'] = label # Common Label\n dic['BIO_labels'] = lab # List of BIO labels for each word\n dic['label_index'] = noLab # Labels in the index form\n dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries\n dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line\n dic['end_line'] = eLine # End line of the concept in the corresponding text summaries\n dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line\n\n # Appending the concept dictionary to the list\n conceptList.append(dic)\n\n return conceptList # Returning the all the concepts in the current file in the form of dictionary list\n\n def parse_summary(self, file_path): # Parses the Text summaries\n file_lines = [] # Stores the lins of files in the list form\n tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])\n default_label = len(self.labelDict)-1 # default_label is \"7\" (Corresponding to 'Other' entity) \n # counter = 1 # Temporary variable used during print\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close()\n\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n file_lines.append(x.strip().split(\" \")) # Spliting the lines into word list and Appending each of them in the file list\n tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line\n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n '''\n assert len(tags[-1]) == len(file_lines[-1]), \"Line length is not matching labels length...\" # Sanity Check\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files\n for e in conceptList: # Iterating over all the dictionary elements in the Concept List\n if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary\n tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries\n if i == start:\n tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]\n beg = len(tags[i-1])-e['start_word_no']\n elif i == end:\n tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]\n else:\n tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]\n beg = beg+len(tags[i-1])\n return tags\n\n def print_data(self, file, file_lines, tags): # Prints the given data\n counter = 1\n\n print(\"\\n************ Printing details of the file: \" + file + \" ************\\n\")\n for x in file_lines:\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(x)\n print(\"\\nCorresponding labels:\")\n print([self.reverseDict[i] for i in tags[counter-1]])\n print(\"\\nCorresponding Label Indices:\")\n print(tags[counter-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n\n def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle\n # Note: The 'obj_list' must be a list and none other than that\n pickle.dump(tuple(obj_list), open(s_path,'wb'))\n\n def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them\n data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name\n\n if task == 'train': # Determining the task type to assign the data path accordingly\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + \".con\"\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file\n file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes\n tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files\n data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary\n # self.print_data(f, file_lines, tags) # Printing the details\n return data\n\n def structure_data(self, data_dict): # Structures the data in proper trainable form\n final_line_list = [] # Stores words of all the files in separate sub-lists\n final_tag_list = [] # Stores tags of all the files in separate sub-lists\n\n for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary\n file_lines = data_dict[k][1] # Extracting story\n tags = data_dict[k][2] # Extracting corresponding labels\n\n # Creating empty lists\n temp1 = []\n temp2 = []\n\n # Merging all the lines in file into a single list. Same for corresponding labels\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n \n assert len(temp1) == len(temp2), \"Word length not matching Label length for story in \" + str(k) # Sanity Check\n\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n \n assert len(final_line_list) == len(final_tag_list), \"Number of stories not matching number of labels list\" # Sanity Check\n return final_line_list, final_tag_list\n \n def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length \n diff = 0\n max_len = 0\n outside_class = len(self.labelDict)-1 # Classifying padding symbol as \"outside\" term\n\n # Calculating Max Summary Length\n for i in range(len(line_list)):\n if len(line_list[i])>max_len:\n max_len = len(line_list[i])\n\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol]*diff)\n tag_list[i].extend([outside_class]*diff)\n assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), \"Padding unsuccessful\" # Sanity check\n return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively\n\n def embed_input(self, line_list): # Converts words to vector embeddings\n final_list = [] # Stores embedded words\n summary = None # Temp variable\n word = None # Temp variable\n temp = None # Temp variable\n\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle\n r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding\n\n for i in range(len(line_list)): # Iterating over all the summaries\n summary = line_list[i]\n final_list.append([]) # Reserving space for curent summary\n\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic: # Checking for existence of word in dictionary\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:] # Copying the values of the list\n random.shuffle(temp) # Randomly shuffling the word embedding to make it unique\n temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'): # Preparing all the data necessary\n line_list, tag_list = None, None\n\n '''\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n '''\n\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data\n \n if not os.path.exists(os.path.join(self.save_path, \"label_dicts_bio.dat\")):\n self.initialize_labels() # Initialize label to index dictionaries\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, \"label_dicts_bio.dat\"), 'rb')) # Loading Label dictionaries\n \n if not os.path.exists(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\")):\n data_dict = self.acquire_data(task) # Read data from file\n line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form\n line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings\n self.save_data([line_list, tag_list], os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"), 'rb')) # Loading Data dictionary\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n\n # Shuffling stories\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n\n num_batch = int(len(story_idx)/self.batch_size)\n self.num_batches = num_batch\n\n # Out Data\n x_out = []\n y_out = []\n \n counter = 1\n\n for i in story_idx:\n if num_batch<=0:\n break\n\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n\n if counter % self.batch_size == 0:\n counter = 0\n \n # Padding and converting labels to one hot vectors\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)\n\n x_out = []\n y_out = []\n num_batch -= 1\n\n yield (self.num_batches - num_batch), x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n # Here, the model is optimized using Cross Entropy Loss.\n loss_list = []\n seq_length = []\n last_batch = 0\n\n # self.load_model(1, 99, 13) # Loading Pre-Trained model to train further\n\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards\n\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n\n if (batch_num % self.save_batch) == 0:\n self.save_model(j, batch_num)\n\n last_batch = batch_num\n print(\"Epoch: \" + str(j) + \"/\" + str(self.num_epoch) + \", Batch: \" + str(batch_num) + \"/\" + str(self.num_batches) + \", Loss: {0:.2f}, \".format(loss.item()) + \\\n \"Batch Accuracy (Entity Prediction): {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self): # Testing the model\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0 # Total labels in data\n result_dict['total_test'] = 0 # Total labels in data\n result_dict['total_treatment'] = 0 # Total labels in data\n result_dict['correct_problem'] = 0 # Correctly classified labels\n result_dict['correct_test'] = 0 # Correctly classified labels\n result_dict['correct_treatment'] = 0 # Correctly classified labels\n result_dict['false_positive_problem'] = 0 # False Positive labels\n result_dict['false_positive_test'] = 0 # False Positive labels\n result_dict['false_positive_treatment'] = 0 # False Positive labels\n print(\"\\n\")\n\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag['test']\n result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']\n\n correct += corr\n total += tot\n print(\"Test Example \" + str(batch_num) + \"/\" + str(self.num_batches) + \" processed, Batch Accuracy: {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n \n result_dict['accuracy'] = (float(correct)/float(total))*100.0\n result_dict = self.calc_metrics(result_dict)\n print(\"\\nOverall Entity Prediction Accuracy: {0:.2f} %\".format(result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict): # Calculates Certain Metrices\n precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision\n recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall\n\n precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision\n recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall\n\n precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision\n recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall\n\n f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score\n f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score\n f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score\n\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score\n\n # Micro Average F1 Score\n correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']\n \n precision_avg = float(correct_sum)/float(correct_sum + fp_sum)\n recall_avg = float(correct_sum)/float(total_sum)\n result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)\n\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n # Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading\n # Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}\n filename = self.model_path + self.name + \"/\" + self.name + \"_\" + str(curr_epoch) + \"_\" + str(curr_batch) + \"_saved_model.pth.tar\"\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + \"/\" + self.name + \"_\" + str(epoch) + \"_\" + str(batch) + \"_saved_model.pth.tar\"\n if option == 1: # Loading for training\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else: # Loading for testing\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()",
"step-ids": [
12,
20,
26,
27,
29
]
}
|
[
12,
20,
26,
27,
29
] |
<|reserved_special_token_0|>
class Net(torch.nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device
=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print(
'Are you sure dropout_prob is supposed to be greater than 0.5?'
)
self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',
pretrained=True)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',
pretrained=True)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device
=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print(
'Are you sure dropout_prob is supposed to be greater than 0.5?'
)
self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',
pretrained=True)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',
pretrained=True)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs['image1'].to(self.device)
first_text = inputs['text1']
first_length = inputs['length1'].to(self.device)
first_categories = inputs['categories1'].to(self.device)
first_days_posted = inputs['days_posted1'].to(self.device)
second_images = inputs['image2'].to(self.device)
second_text = inputs['text2']
second_length = inputs['length2'].to(self.device)
second_categories = inputs['categories2'].to(self.device)
second_days_posted = inputs['days_posted2'].to(self.device)
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
feature_means = torch.mean(features, dim=1)
text_features2 = torch.cat([text_features2, feature_means])
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2,
concat_tensor), 1)
additional_features = torch.cat([torch.reshape(first_length, (-1, 1
)), torch.reshape(second_length, (-1, 1)), torch.reshape(
first_days_posted, (-1, 1)), torch.reshape(second_days_posted,
(-1, 1))], dim=1)
concat_tensor = torch.cat([concat_tensor, additional_features.float
(), first_categories.float(), second_categories.float()], dim=1)
x = concat_tensor
zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)
) if self.drops is None else zip(self.bns, self.fcs, self.drops)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device
=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print(
'Are you sure dropout_prob is supposed to be greater than 0.5?'
)
self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',
pretrained=True)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',
pretrained=True)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs['image1'].to(self.device)
first_text = inputs['text1']
first_length = inputs['length1'].to(self.device)
first_categories = inputs['categories1'].to(self.device)
first_days_posted = inputs['days_posted1'].to(self.device)
second_images = inputs['image2'].to(self.device)
second_text = inputs['text2']
second_length = inputs['length2'].to(self.device)
second_categories = inputs['categories2'].to(self.device)
second_days_posted = inputs['days_posted2'].to(self.device)
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
feature_means = torch.mean(features, dim=1)
text_features2 = torch.cat([text_features2, feature_means])
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2,
concat_tensor), 1)
additional_features = torch.cat([torch.reshape(first_length, (-1, 1
)), torch.reshape(second_length, (-1, 1)), torch.reshape(
first_days_posted, (-1, 1)), torch.reshape(second_days_posted,
(-1, 1))], dim=1)
concat_tensor = torch.cat([concat_tensor, additional_features.float
(), first_categories.float(), second_categories.float()], dim=1)
x = concat_tensor
zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)
) if self.drops is None else zip(self.bns, self.fcs, self.drops)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print("Are you sure dropout_prob is supposed to be greater than 0.5?")
# Load Roberta
self.roberta = torch.hub.load(
"pytorch/fairseq", "roberta.base", pretrained=True
)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
# Load ResNet
resnet_full = torch.hub.load(
"pytorch/vision:v0.6.0", "resnet18", pretrained=True
)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
# for param in self.resnet.parameters():
# param.requires_grad = False
# self.resnet.eval()
# self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2)
# self.lstm.eval()
# Layers
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs["image1"].to(self.device)
first_text = inputs["text1"]
first_length = inputs["length1"].to(self.device)
first_categories = inputs["categories1"].to(self.device)
first_days_posted = inputs["days_posted1"].to(self.device)
second_images = inputs["image2"].to(self.device)
second_text = inputs["text2"]
second_length = inputs["length2"].to(self.device)
second_categories = inputs["categories2"].to(self.device)
second_days_posted = inputs["days_posted2"].to(self.device)
# Resnet
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
# Roberta
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
# features = torch.reshape(features, (-1, 1,768))
# output, (hn, cn) = self.lstm(features)
# cn = torch.reshape(cn, (1, 768 * 2))
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
# print("DIMENSION OF FEATURES ", features.shape)
feature_means = torch.mean(features, dim=1)
# features = torch.reshape(features, (-1, 1,768))
# output, (hn, cn) = self.lstm(features)
# cn = torch.reshape(cn, (1, 768 * 2))
# print("DIMENSION OF FEATURES ", features.shape)
text_features2 = torch.cat([text_features2, feature_means])
# Concatenated tensor
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1)
additional_features = torch.cat(
[
torch.reshape(first_length, (-1, 1)),
torch.reshape(second_length, (-1, 1)),
torch.reshape(first_days_posted, (-1, 1)),
torch.reshape(second_days_posted, (-1, 1)),
],
dim=1,
)
concat_tensor = torch.cat(
[
concat_tensor,
additional_features.float(),
first_categories.float(),
second_categories.float(),
],
dim=1,
)
x = concat_tensor
zipped_layers = (
zip(self.bns, self.fcs, [None] * len(self.bns))
if self.drops is None
else zip(self.bns, self.fcs, self.drops)
)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
|
flexible
|
{
"blob_id": "4711adcc7c95993ec13b9d06fa674aa064f79bfd",
"index": 314,
"step-1": "<mask token>\n\n\nclass Net(torch.nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs['image1'].to(self.device)\n first_text = inputs['text1']\n first_length = inputs['length1'].to(self.device)\n first_categories = inputs['categories1'].to(self.device)\n first_days_posted = inputs['days_posted1'].to(self.device)\n second_images = inputs['image2'].to(self.device)\n second_text = inputs['text2']\n second_length = inputs['length2'].to(self.device)\n second_categories = inputs['categories2'].to(self.device)\n second_days_posted = inputs['days_posted2'].to(self.device)\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features2 = torch.cat([text_features2, feature_means])\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2,\n concat_tensor), 1)\n additional_features = torch.cat([torch.reshape(first_length, (-1, 1\n )), torch.reshape(second_length, (-1, 1)), torch.reshape(\n first_days_posted, (-1, 1)), torch.reshape(second_days_posted,\n (-1, 1))], dim=1)\n concat_tensor = torch.cat([concat_tensor, additional_features.float\n (), first_categories.float(), second_categories.float()], dim=1)\n x = concat_tensor\n zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)\n ) if self.drops is None else zip(self.bns, self.fcs, self.drops)\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n return x\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs['image1'].to(self.device)\n first_text = inputs['text1']\n first_length = inputs['length1'].to(self.device)\n first_categories = inputs['categories1'].to(self.device)\n first_days_posted = inputs['days_posted1'].to(self.device)\n second_images = inputs['image2'].to(self.device)\n second_text = inputs['text2']\n second_length = inputs['length2'].to(self.device)\n second_categories = inputs['categories2'].to(self.device)\n second_days_posted = inputs['days_posted2'].to(self.device)\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features2 = torch.cat([text_features2, feature_means])\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2,\n concat_tensor), 1)\n additional_features = torch.cat([torch.reshape(first_length, (-1, 1\n )), torch.reshape(second_length, (-1, 1)), torch.reshape(\n first_days_posted, (-1, 1)), torch.reshape(second_days_posted,\n (-1, 1))], dim=1)\n concat_tensor = torch.cat([concat_tensor, additional_features.float\n (), first_categories.float(), second_categories.float()], dim=1)\n x = concat_tensor\n zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)\n ) if self.drops is None else zip(self.bns, self.fcs, self.drops)\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n return x\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(torch.nn.Module):\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None):\n super(Net, self).__init__()\n self.device = device\n\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\"Are you sure dropout_prob is supposed to be greater than 0.5?\")\n\n # Load Roberta\n self.roberta = torch.hub.load(\n \"pytorch/fairseq\", \"roberta.base\", pretrained=True\n )\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n\n # Load ResNet\n resnet_full = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True\n )\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n # for param in self.resnet.parameters():\n # param.requires_grad = False\n # self.resnet.eval()\n\n # self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2)\n # self.lstm.eval()\n\n # Layers\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs[\"image1\"].to(self.device)\n first_text = inputs[\"text1\"]\n first_length = inputs[\"length1\"].to(self.device)\n first_categories = inputs[\"categories1\"].to(self.device)\n first_days_posted = inputs[\"days_posted1\"].to(self.device)\n\n second_images = inputs[\"image2\"].to(self.device)\n second_text = inputs[\"text2\"]\n second_length = inputs[\"length2\"].to(self.device)\n second_categories = inputs[\"categories2\"].to(self.device)\n second_days_posted = inputs[\"days_posted2\"].to(self.device)\n\n # Resnet\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n # Roberta\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n # features = torch.reshape(features, (-1, 1,768))\n # output, (hn, cn) = self.lstm(features)\n # cn = torch.reshape(cn, (1, 768 * 2))\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n # print(\"DIMENSION OF FEATURES \", features.shape)\n feature_means = torch.mean(features, dim=1)\n # features = torch.reshape(features, (-1, 1,768))\n # output, (hn, cn) = self.lstm(features)\n # cn = torch.reshape(cn, (1, 768 * 2))\n # print(\"DIMENSION OF FEATURES \", features.shape)\n text_features2 = torch.cat([text_features2, feature_means])\n\n # Concatenated tensor\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1)\n additional_features = torch.cat(\n [\n torch.reshape(first_length, (-1, 1)),\n torch.reshape(second_length, (-1, 1)),\n torch.reshape(first_days_posted, (-1, 1)),\n torch.reshape(second_days_posted, (-1, 1)),\n ],\n dim=1,\n )\n concat_tensor = torch.cat(\n [\n concat_tensor,\n additional_features.float(),\n first_categories.float(),\n second_categories.float(),\n ],\n dim=1,\n )\n\n x = concat_tensor\n zipped_layers = (\n zip(self.bns, self.fcs, [None] * len(self.bns))\n if self.drops is None\n else zip(self.bns, self.fcs, self.drops)\n )\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n\n return x\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import ordenador
import pytest
import contatempo
class TestaOrdenador:
@pytest.fixture
def ordenad(self):
return ordenador.Ordenador()
@pytest.fixture
def list_quase_ord(self):
c = contatempo.ContaTempos()
return c.lista_quase_ordenada(100)
@pytest.fixture
def list_aleatoria(self):
c = contatempo.ContaTempos()
return c.lista_aleatoria(100)
def esta_ordenada(self, lista):
for i in range(len(lista)-1):
if lista[i] > lista[i+1]:
return False
return True
def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):
ordenad.selecao_bolha_melhorada(list_aleatoria)
assert self.esta_ordenada(list_aleatoria)
def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):
ordenad.selecao_direta(list_aleatoria)
assert self.esta_ordenada(list_aleatoria)
def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):
ordenad.selecao_bolha_melhorada(list_quase_ord)
assert self.esta_ordenada(list_quase_ord)
def test_selecao_direta_quase_ord(self, ordenad, list_quase_ord):
ordenad.selecao_direta(list_quase_ord)
assert self.esta_ordenada(list_quase_ord)
|
normal
|
{
"blob_id": "32bb6d5ad0a1398c9ab89190c087fe3916631878",
"index": 7750,
"step-1": "<mask token>\n\n\nclass TestaOrdenador:\n\n @pytest.fixture\n def ordenad(self):\n return ordenador.Ordenador()\n <mask token>\n <mask token>\n <mask token>\n\n def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_bolha_melhorada(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_direta(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_bolha_melhorada(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestaOrdenador:\n\n @pytest.fixture\n def ordenad(self):\n return ordenador.Ordenador()\n\n @pytest.fixture\n def list_quase_ord(self):\n c = contatempo.ContaTempos()\n return c.lista_quase_ordenada(100)\n <mask token>\n <mask token>\n\n def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_bolha_melhorada(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_direta(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_bolha_melhorada(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestaOrdenador:\n\n @pytest.fixture\n def ordenad(self):\n return ordenador.Ordenador()\n\n @pytest.fixture\n def list_quase_ord(self):\n c = contatempo.ContaTempos()\n return c.lista_quase_ordenada(100)\n <mask token>\n\n def esta_ordenada(self, lista):\n for i in range(len(lista) - 1):\n if lista[i] > lista[i + 1]:\n return False\n return True\n\n def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_bolha_melhorada(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_direta(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_bolha_melhorada(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n\n def test_selecao_direta_quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_direta(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n",
"step-4": "<mask token>\n\n\nclass TestaOrdenador:\n\n @pytest.fixture\n def ordenad(self):\n return ordenador.Ordenador()\n\n @pytest.fixture\n def list_quase_ord(self):\n c = contatempo.ContaTempos()\n return c.lista_quase_ordenada(100)\n\n @pytest.fixture\n def list_aleatoria(self):\n c = contatempo.ContaTempos()\n return c.lista_aleatoria(100)\n\n def esta_ordenada(self, lista):\n for i in range(len(lista) - 1):\n if lista[i] > lista[i + 1]:\n return False\n return True\n\n def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_bolha_melhorada(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_direta(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_bolha_melhorada(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n\n def test_selecao_direta_quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_direta(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n",
"step-5": "import ordenador\nimport pytest\nimport contatempo\n\nclass TestaOrdenador:\n\n @pytest.fixture\n def ordenad(self):\n return ordenador.Ordenador()\n\n @pytest.fixture\n def list_quase_ord(self):\n c = contatempo.ContaTempos()\n return c.lista_quase_ordenada(100)\n\n @pytest.fixture\n def list_aleatoria(self):\n c = contatempo.ContaTempos()\n return c.lista_aleatoria(100)\n\n def esta_ordenada(self, lista):\n for i in range(len(lista)-1):\n if lista[i] > lista[i+1]:\n return False\n return True\n \n def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_bolha_melhorada(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):\n ordenad.selecao_direta(list_aleatoria)\n assert self.esta_ordenada(list_aleatoria)\n\n def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_bolha_melhorada(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n\n def test_selecao_direta_quase_ord(self, ordenad, list_quase_ord):\n ordenad.selecao_direta(list_quase_ord)\n assert self.esta_ordenada(list_quase_ord)\n\n \n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
from datetime import datetime, timezone, timedelta
import json
import urllib.request
from mysql_dbcon import Connection
from model import SlackChannel, SlackUser, SlackMessage
# TODO set timezone at config
jst = timezone(timedelta(hours=+9), 'JST')
def get_new_message_list(channel_id: int):
with Connection() as cn:
token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel).filter(
SlackChannel.id == channel_id).one()
user_dict = {user.user: user.other_name for user in cn.s.query(SlackUser).all()}
with urllib.request.urlopen(
f'https://slack.com/api/channels.history?token={token}&channel={channel}') as res:
json_dict = json.load(res)
print(json_dict)
messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))
client_msg_id_list = [
id_ for id_, in cn.s.query(SlackMessage.client_msg_id).filter(
SlackMessage.client_msg_id.in_([message.get('client_msg_id') for message in messages])
).all()]
message_list = []
insert_msg_id_list = []
for message in messages:
if not (message.get('user') and message.get('text') and message.get('client_msg_id')):
continue
if message.get('client_msg_id') in client_msg_id_list:
continue
time_stamp = message.get('ts', '')
if time_stamp:
time_stamp = datetime.fromtimestamp(float(time_stamp), jst).strftime('%m/%d %H:%M:%S')
text = message['text']
for user, name in user_dict.items():
text = text.replace(user, name)
message_list.append(user_dict[message['user']] + ':[' + time_stamp + '] ' + text)
insert_msg_id_list.append({'client_msg_id': message['client_msg_id']})
cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)
cn.s.commit()
return message_list
|
normal
|
{
"blob_id": "2b141f12bec2006e496bf58a3fcb0167c95ab3b6",
"index": 2530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel\n ).filter(SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(\n SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}'\n ) as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.\n client_msg_id).filter(SlackMessage.client_msg_id.in_([message.\n get('client_msg_id') for message in messages])).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message\n .get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst\n ).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' +\n time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message[\n 'client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n return message_list\n",
"step-3": "<mask token>\njst = timezone(timedelta(hours=+9), 'JST')\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel\n ).filter(SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(\n SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}'\n ) as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.\n client_msg_id).filter(SlackMessage.client_msg_id.in_([message.\n get('client_msg_id') for message in messages])).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message\n .get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst\n ).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' +\n time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message[\n 'client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n return message_list\n",
"step-4": "from datetime import datetime, timezone, timedelta\nimport json\nimport urllib.request\nfrom mysql_dbcon import Connection\nfrom model import SlackChannel, SlackUser, SlackMessage\njst = timezone(timedelta(hours=+9), 'JST')\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel\n ).filter(SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(\n SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}'\n ) as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [id_ for id_, in cn.s.query(SlackMessage.\n client_msg_id).filter(SlackMessage.client_msg_id.in_([message.\n get('client_msg_id') for message in messages])).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message\n .get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst\n ).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' +\n time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message[\n 'client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n return message_list\n",
"step-5": "from datetime import datetime, timezone, timedelta\nimport json\nimport urllib.request\n\nfrom mysql_dbcon import Connection\nfrom model import SlackChannel, SlackUser, SlackMessage\n\n\n# TODO set timezone at config\njst = timezone(timedelta(hours=+9), 'JST')\n\n\ndef get_new_message_list(channel_id: int):\n with Connection() as cn:\n token, channel = cn.s.query(SlackChannel.token, SlackChannel.channel).filter(\n SlackChannel.id == channel_id).one()\n user_dict = {user.user: user.other_name for user in cn.s.query(SlackUser).all()}\n with urllib.request.urlopen(\n f'https://slack.com/api/channels.history?token={token}&channel={channel}') as res:\n json_dict = json.load(res)\n print(json_dict)\n messages = sorted(json_dict['messages'], key=lambda x: x.get('ts', ''))\n client_msg_id_list = [\n id_ for id_, in cn.s.query(SlackMessage.client_msg_id).filter(\n SlackMessage.client_msg_id.in_([message.get('client_msg_id') for message in messages])\n ).all()]\n message_list = []\n insert_msg_id_list = []\n for message in messages:\n if not (message.get('user') and message.get('text') and message.get('client_msg_id')):\n continue\n if message.get('client_msg_id') in client_msg_id_list:\n continue\n time_stamp = message.get('ts', '')\n if time_stamp:\n time_stamp = datetime.fromtimestamp(float(time_stamp), jst).strftime('%m/%d %H:%M:%S')\n text = message['text']\n for user, name in user_dict.items():\n text = text.replace(user, name)\n message_list.append(user_dict[message['user']] + ':[' + time_stamp + '] ' + text)\n insert_msg_id_list.append({'client_msg_id': message['client_msg_id']})\n cn.s.bulk_insert_mappings(SlackMessage, insert_msg_id_list)\n cn.s.commit()\n\n return message_list\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def swap(arr, first, second):
"""
Swaps the first index with the second.
arr: an input array
first: an index in the array
second: an index in the array
This function has the side effect mentioned above.
"""
arr[first], arr[second] = arr[second], arr[first]
def parent(i):
"""
i: an integer index in a heap.
Returns the index of the parent of the given index.
"""
return (i + 1) / 2 - 1
def left(i):
"""
i: an integer index in a heap.
Returns the index of the left-child of the given index.
"""
return 2 * (i + 1) - 1
<|reserved_special_token_0|>
def build_max_heap(arr):
for i in range(len(arr) / 2, 0, -1):
max_heapify(arr, i - 1)
<|reserved_special_token_0|>
def heap_sort(arr):
build_max_heap(arr)
sorted_list = []
while arr:
sorted_list.append(arr.pop(0))
max_heapify(arr, 0)
sorted_list.reverse()
return sorted_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def swap(arr, first, second):
"""
Swaps the first index with the second.
arr: an input array
first: an index in the array
second: an index in the array
This function has the side effect mentioned above.
"""
arr[first], arr[second] = arr[second], arr[first]
def parent(i):
"""
i: an integer index in a heap.
Returns the index of the parent of the given index.
"""
return (i + 1) / 2 - 1
def left(i):
"""
i: an integer index in a heap.
Returns the index of the left-child of the given index.
"""
return 2 * (i + 1) - 1
<|reserved_special_token_0|>
def max_heapify(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
left_child = left(i)
right_child = right(i)
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
else:
largest = i
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest != i:
swap(heap, i, largest)
max_heapify(heap, largest)
<|reserved_special_token_0|>
def build_max_heap(arr):
for i in range(len(arr) / 2, 0, -1):
max_heapify(arr, i - 1)
def max_heapify_unrecursive(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
while True:
left_child = left(i)
right_child = right(i)
largest = i
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest == i:
return
swap(heap, i, largest)
i = largest
def heap_sort(arr):
build_max_heap(arr)
sorted_list = []
while arr:
sorted_list.append(arr.pop(0))
max_heapify(arr, 0)
sorted_list.reverse()
return sorted_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def swap(arr, first, second):
"""
Swaps the first index with the second.
arr: an input array
first: an index in the array
second: an index in the array
This function has the side effect mentioned above.
"""
arr[first], arr[second] = arr[second], arr[first]
def parent(i):
"""
i: an integer index in a heap.
Returns the index of the parent of the given index.
"""
return (i + 1) / 2 - 1
def left(i):
"""
i: an integer index in a heap.
Returns the index of the left-child of the given index.
"""
return 2 * (i + 1) - 1
def right(i):
"""
i: an integer index in a heap
Returns the index of the right-child of the given index.
"""
return 2 * (i + 1)
def max_heapify(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
left_child = left(i)
right_child = right(i)
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
else:
largest = i
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest != i:
swap(heap, i, largest)
max_heapify(heap, largest)
<|reserved_special_token_0|>
def build_max_heap(arr):
for i in range(len(arr) / 2, 0, -1):
max_heapify(arr, i - 1)
def max_heapify_unrecursive(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
while True:
left_child = left(i)
right_child = right(i)
largest = i
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest == i:
return
swap(heap, i, largest)
i = largest
def heap_sort(arr):
build_max_heap(arr)
sorted_list = []
while arr:
sorted_list.append(arr.pop(0))
max_heapify(arr, 0)
sorted_list.reverse()
return sorted_list
<|reserved_special_token_1|>
import array
def swap(arr, first, second):
"""
Swaps the first index with the second.
arr: an input array
first: an index in the array
second: an index in the array
This function has the side effect mentioned above.
"""
arr[first], arr[second] = arr[second], arr[first]
def parent(i):
"""
i: an integer index in a heap.
Returns the index of the parent of the given index.
"""
return (i + 1) / 2 - 1
def left(i):
"""
i: an integer index in a heap.
Returns the index of the left-child of the given index.
"""
return 2 * (i + 1) - 1
def right(i):
"""
i: an integer index in a heap
Returns the index of the right-child of the given index.
"""
return 2 * (i + 1)
def max_heapify(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
left_child = left(i)
right_child = right(i)
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
else:
largest = i
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest != i:
swap(heap, i, largest)
max_heapify(heap, largest)
example_heap = array.array('i', [16, 4, 10, 14, 7, 9, 3, 2, 8, 1])
def build_max_heap(arr):
for i in range(len(arr) / 2, 0, -1):
max_heapify(arr, i - 1)
def max_heapify_unrecursive(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
while True:
left_child = left(i)
right_child = right(i)
largest = i
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest == i:
return
swap(heap, i, largest)
i = largest
def heap_sort(arr):
build_max_heap(arr)
sorted_list = []
while arr:
sorted_list.append(arr.pop(0))
max_heapify(arr, 0)
sorted_list.reverse()
return sorted_list
|
flexible
|
{
"blob_id": "1262d41be3bf873d003464cb23998dde20fde318",
"index": 8115,
"step-1": "<mask token>\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\n<mask token>\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\n<mask token>\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-2": "<mask token>\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\n<mask token>\n\n\ndef max_heapify(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)\n\n\n<mask token>\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\ndef max_heapify_unrecursive(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-3": "<mask token>\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\ndef right(i):\n \"\"\"\n i: an integer index in a heap\n\n Returns the index of the right-child of the given index.\n \"\"\"\n return 2 * (i + 1)\n\n\ndef max_heapify(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)\n\n\n<mask token>\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\ndef max_heapify_unrecursive(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-4": "import array\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\ndef right(i):\n \"\"\"\n i: an integer index in a heap\n\n Returns the index of the right-child of the given index.\n \"\"\"\n return 2 * (i + 1)\n\n\ndef max_heapify(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)\n\n\nexample_heap = array.array('i', [16, 4, 10, 14, 7, 9, 3, 2, 8, 1])\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\ndef max_heapify_unrecursive(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-5": null,
"step-ids": [
5,
7,
8,
10
]
}
|
[
5,
7,
8,
10
] |
<|reserved_special_token_0|>
class Renderable:
def __init__(self, material_name: str, attributes: Dict[str, np.ndarray
], model_mat=np.eye(4), uv_scale=1.0):
self.model_mat = model_mat
self.material_name = material_name
self._attributes = attributes
self._uv_scale = uv_scale
self._current_scene = None
self._program = None
self._scene_version = -1
<|reserved_special_token_0|>
def scale_uv_scale(self, v):
self._uv_scale *= v
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DummyRenderer(app.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
gloo.set_viewport(0, 0, *self.size)
def __enter__(self):
self._backend._vispy_warmup()
return self
class ContextProvider:
def __init__(self, size):
self.size = size
canvas = gloo.get_current_canvas()
self.context_exists = canvas is not None and not canvas._closed
if self.context_exists:
logger.debug('Using existing OpenGL context.')
self.provider = gloo.get_current_canvas()
self.previous_size = self.provider.size
else:
logger.debug('Providing temporary context with DummyRenderer.')
self.provider = DummyRenderer(size=size)
def __enter__(self):
gloo.set_viewport(0, 0, *self.size)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.context_exists:
self.provider.__exit__(exc_type, exc_val, exc_tb)
else:
gloo.set_viewport(0, 0, *self.previous_size)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Renderable:
def __init__(self, material_name: str, attributes: Dict[str, np.ndarray
], model_mat=np.eye(4), uv_scale=1.0):
self.model_mat = model_mat
self.material_name = material_name
self._attributes = attributes
self._uv_scale = uv_scale
self._current_scene = None
self._program = None
self._scene_version = -1
<|reserved_special_token_0|>
def scale_uv_scale(self, v):
self._uv_scale *= v
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def activate(self, scene, camera):
material = scene.get_material(self.material_name)
if self._program is None or scene != self._current_scene:
self._current_scene = scene
self._scene_version = -1
if self._scene_version != scene.version:
self._current_scene = scene
self._scene_version = scene.version
self._program = material.compile(num_lights=len(scene.lights),
num_shadow_sources=len(scene.shadow_sources),
use_radiance_map=scene.radiance_map is not None)
material.upload_attributes(self._program, self._attributes,
self._uv_scale)
material.upload_radmap(self._program, scene.radiance_map)
material.upload_shadow_sources(self._program, scene.shadow_sources)
material.upload_lights(self._program, scene.lights)
material.upload_camera(self._program, camera)
self._program['u_model'] = self.model_mat.T
return self._program
<|reserved_special_token_0|>
class DummyRenderer(app.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
gloo.set_viewport(0, 0, *self.size)
def __enter__(self):
self._backend._vispy_warmup()
return self
class ContextProvider:
def __init__(self, size):
self.size = size
canvas = gloo.get_current_canvas()
self.context_exists = canvas is not None and not canvas._closed
if self.context_exists:
logger.debug('Using existing OpenGL context.')
self.provider = gloo.get_current_canvas()
self.previous_size = self.provider.size
else:
logger.debug('Providing temporary context with DummyRenderer.')
self.provider = DummyRenderer(size=size)
def __enter__(self):
gloo.set_viewport(0, 0, *self.size)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.context_exists:
self.provider.__exit__(exc_type, exc_val, exc_tb)
else:
gloo.set_viewport(0, 0, *self.previous_size)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Renderable:
def __init__(self, material_name: str, attributes: Dict[str, np.ndarray
], model_mat=np.eye(4), uv_scale=1.0):
self.model_mat = model_mat
self.material_name = material_name
self._attributes = attributes
self._uv_scale = uv_scale
self._current_scene = None
self._program = None
self._scene_version = -1
def set_uv_scale(self, scale):
self._uv_scale = scale
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def scale_uv_scale(self, v):
self._uv_scale *= v
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def activate(self, scene, camera):
material = scene.get_material(self.material_name)
if self._program is None or scene != self._current_scene:
self._current_scene = scene
self._scene_version = -1
if self._scene_version != scene.version:
self._current_scene = scene
self._scene_version = scene.version
self._program = material.compile(num_lights=len(scene.lights),
num_shadow_sources=len(scene.shadow_sources),
use_radiance_map=scene.radiance_map is not None)
material.upload_attributes(self._program, self._attributes,
self._uv_scale)
material.upload_radmap(self._program, scene.radiance_map)
material.upload_shadow_sources(self._program, scene.shadow_sources)
material.upload_lights(self._program, scene.lights)
material.upload_camera(self._program, camera)
self._program['u_model'] = self.model_mat.T
return self._program
<|reserved_special_token_0|>
class DummyRenderer(app.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
gloo.set_viewport(0, 0, *self.size)
def __enter__(self):
self._backend._vispy_warmup()
return self
class ContextProvider:
def __init__(self, size):
self.size = size
canvas = gloo.get_current_canvas()
self.context_exists = canvas is not None and not canvas._closed
if self.context_exists:
logger.debug('Using existing OpenGL context.')
self.provider = gloo.get_current_canvas()
self.previous_size = self.provider.size
else:
logger.debug('Providing temporary context with DummyRenderer.')
self.provider = DummyRenderer(size=size)
def __enter__(self):
gloo.set_viewport(0, 0, *self.size)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.context_exists:
self.provider.__exit__(exc_type, exc_val, exc_tb)
else:
gloo.set_viewport(0, 0, *self.previous_size)
def draw_depth(camera, renderables, rend_target):
rendfb, rendtex, _ = rend_target
material = DepthMaterial()
program = DepthMaterial().compile()
with rendfb:
gloo.clear(color=camera.clear_color)
gloo.set_state(depth_test=True)
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_FRONT)
for renderable in renderables:
material.upload_camera(program, camera)
material.upload_attributes(program, renderable._attributes)
program['u_model'] = renderable.model_mat.T
program.draw(gl.GL_TRIANGLES)
gl.glCullFace(gl.GL_BACK)
gl.glDisable(gl.GL_CULL_FACE)
<|reserved_special_token_1|>
import logging
from typing import Dict
import numpy as np
from meshkit import Mesh
from rendkit.materials import DepthMaterial
from vispy import gloo, app
from vispy.gloo import gl
logger = logging.getLogger(__name__)
class Renderable:
def __init__(self, material_name: str, attributes: Dict[str, np.ndarray
], model_mat=np.eye(4), uv_scale=1.0):
self.model_mat = model_mat
self.material_name = material_name
self._attributes = attributes
self._uv_scale = uv_scale
self._current_scene = None
self._program = None
self._scene_version = -1
def set_uv_scale(self, scale):
self._uv_scale = scale
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def scale_uv_scale(self, v):
self._uv_scale *= v
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def activate(self, scene, camera):
material = scene.get_material(self.material_name)
if self._program is None or scene != self._current_scene:
self._current_scene = scene
self._scene_version = -1
if self._scene_version != scene.version:
self._current_scene = scene
self._scene_version = scene.version
self._program = material.compile(num_lights=len(scene.lights),
num_shadow_sources=len(scene.shadow_sources),
use_radiance_map=scene.radiance_map is not None)
material.upload_attributes(self._program, self._attributes,
self._uv_scale)
material.upload_radmap(self._program, scene.radiance_map)
material.upload_shadow_sources(self._program, scene.shadow_sources)
material.upload_lights(self._program, scene.lights)
material.upload_camera(self._program, camera)
self._program['u_model'] = self.model_mat.T
return self._program
def mesh_to_renderables(mesh: Mesh, model_mat):
renderables = []
for material_id, material_name in enumerate(mesh.materials):
filter = {'material': material_id}
vertex_positions = mesh.expand_face_vertices(filter)
vertex_normals = mesh.expand_face_normals(filter)
vertex_tangents, vertex_bitangents = mesh.expand_tangents(filter)
vertex_uvs = mesh.expand_face_uvs(filter)
if len(vertex_positions) < 3:
logger.warning('Material {} not visible.'.format(material_name))
continue
attributes = dict(a_position=vertex_positions, a_normal=
vertex_normals, a_tangent=vertex_tangents, a_bitangent=
vertex_bitangents, a_uv=vertex_uvs)
renderables.append(Renderable(material_name, attributes, model_mat,
uv_scale=mesh.uv_scale))
return renderables
class DummyRenderer(app.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
gloo.set_viewport(0, 0, *self.size)
def __enter__(self):
self._backend._vispy_warmup()
return self
class ContextProvider:
def __init__(self, size):
self.size = size
canvas = gloo.get_current_canvas()
self.context_exists = canvas is not None and not canvas._closed
if self.context_exists:
logger.debug('Using existing OpenGL context.')
self.provider = gloo.get_current_canvas()
self.previous_size = self.provider.size
else:
logger.debug('Providing temporary context with DummyRenderer.')
self.provider = DummyRenderer(size=size)
def __enter__(self):
gloo.set_viewport(0, 0, *self.size)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.context_exists:
self.provider.__exit__(exc_type, exc_val, exc_tb)
else:
gloo.set_viewport(0, 0, *self.previous_size)
def draw_depth(camera, renderables, rend_target):
rendfb, rendtex, _ = rend_target
material = DepthMaterial()
program = DepthMaterial().compile()
with rendfb:
gloo.clear(color=camera.clear_color)
gloo.set_state(depth_test=True)
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_FRONT)
for renderable in renderables:
material.upload_camera(program, camera)
material.upload_attributes(program, renderable._attributes)
program['u_model'] = renderable.model_mat.T
program.draw(gl.GL_TRIANGLES)
gl.glCullFace(gl.GL_BACK)
gl.glDisable(gl.GL_CULL_FACE)
<|reserved_special_token_1|>
import logging
from typing import Dict
import numpy as np
from meshkit import Mesh
from rendkit.materials import DepthMaterial
from vispy import gloo, app
from vispy.gloo import gl
logger = logging.getLogger(__name__)
class Renderable:
def __init__(self,
material_name: str,
attributes: Dict[str, np.ndarray],
model_mat=np.eye(4),
uv_scale=1.0):
self.model_mat = model_mat
self.material_name = material_name
self._attributes = attributes
self._uv_scale = uv_scale
self._current_scene = None
self._program = None
self._scene_version = -1
def set_uv_scale(self, scale):
self._uv_scale = scale
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def scale_uv_scale(self, v):
self._uv_scale *= v
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def activate(self, scene, camera):
material = scene.get_material(self.material_name)
if self._program is None or scene != self._current_scene:
self._current_scene = scene
self._scene_version = -1
if self._scene_version != scene.version:
self._current_scene = scene
self._scene_version = scene.version
self._program = material.compile(
num_lights=len(scene.lights),
num_shadow_sources=len(scene.shadow_sources),
use_radiance_map=scene.radiance_map is not None)
material.upload_attributes(self._program, self._attributes, self._uv_scale)
material.upload_radmap(self._program, scene.radiance_map)
material.upload_shadow_sources(self._program, scene.shadow_sources)
material.upload_lights(self._program, scene.lights)
material.upload_camera(self._program, camera)
self._program['u_model'] = self.model_mat.T
return self._program
def mesh_to_renderables(mesh: Mesh, model_mat):
renderables = []
# For now each renderable represents a submesh with the same materials.
for material_id, material_name in enumerate(mesh.materials):
filter = {'material': material_id}
vertex_positions = mesh.expand_face_vertices(filter)
vertex_normals = mesh.expand_face_normals(filter)
vertex_tangents, vertex_bitangents = mesh.expand_tangents(
filter)
vertex_uvs = mesh.expand_face_uvs(filter)
if len(vertex_positions) < 3:
logger.warning('Material {} not visible.'.format(material_name))
continue
attributes = dict(
a_position=vertex_positions,
a_normal=vertex_normals,
a_tangent=vertex_tangents,
a_bitangent=vertex_bitangents,
a_uv=vertex_uvs
)
renderables.append(Renderable(material_name, attributes, model_mat,
uv_scale=mesh.uv_scale))
return renderables
class DummyRenderer(app.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
gloo.set_viewport(0, 0, *self.size)
def __enter__(self):
self._backend._vispy_warmup()
return self
class ContextProvider:
def __init__(self, size):
self.size = size
canvas = gloo.get_current_canvas()
self.context_exists = canvas is not None and not canvas._closed
if self.context_exists:
logger.debug("Using existing OpenGL context.")
self.provider = gloo.get_current_canvas()
self.previous_size = self.provider.size
else:
logger.debug("Providing temporary context with DummyRenderer.")
self.provider = DummyRenderer(size=size)
def __enter__(self):
gloo.set_viewport(0, 0, *self.size)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.context_exists:
self.provider.__exit__(exc_type, exc_val, exc_tb)
else:
gloo.set_viewport(0, 0, *self.previous_size)
def draw_depth(camera, renderables, rend_target):
rendfb, rendtex, _ = rend_target
material = DepthMaterial()
program = DepthMaterial().compile()
with rendfb:
gloo.clear(color=camera.clear_color)
gloo.set_state(depth_test=True)
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_FRONT)
for renderable in renderables:
material.upload_camera(program, camera)
material.upload_attributes(program, renderable._attributes)
program['u_model'] = renderable.model_mat.T
program.draw(gl.GL_TRIANGLES)
gl.glCullFace(gl.GL_BACK)
gl.glDisable(gl.GL_CULL_FACE)
|
flexible
|
{
"blob_id": "061c287d5f0a5feeeaedc80eea6b3fc4ff02286e",
"index": 7191,
"step-1": "<mask token>\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n <mask token>\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n <mask token>\n\n\n<mask token>\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n <mask token>\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes,\n self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n return self._program\n\n\n<mask token>\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n\n def set_uv_scale(self, scale):\n self._uv_scale = scale\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes,\n self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n return self._program\n\n\n<mask token>\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\ndef draw_depth(camera, renderables, rend_target):\n rendfb, rendtex, _ = rend_target\n material = DepthMaterial()\n program = DepthMaterial().compile()\n with rendfb:\n gloo.clear(color=camera.clear_color)\n gloo.set_state(depth_test=True)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_FRONT)\n for renderable in renderables:\n material.upload_camera(program, camera)\n material.upload_attributes(program, renderable._attributes)\n program['u_model'] = renderable.model_mat.T\n program.draw(gl.GL_TRIANGLES)\n gl.glCullFace(gl.GL_BACK)\n gl.glDisable(gl.GL_CULL_FACE)\n",
"step-4": "import logging\nfrom typing import Dict\nimport numpy as np\nfrom meshkit import Mesh\nfrom rendkit.materials import DepthMaterial\nfrom vispy import gloo, app\nfrom vispy.gloo import gl\nlogger = logging.getLogger(__name__)\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n\n def set_uv_scale(self, scale):\n self._uv_scale = scale\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes,\n self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n return self._program\n\n\ndef mesh_to_renderables(mesh: Mesh, model_mat):\n renderables = []\n for material_id, material_name in enumerate(mesh.materials):\n filter = {'material': material_id}\n vertex_positions = mesh.expand_face_vertices(filter)\n vertex_normals = mesh.expand_face_normals(filter)\n vertex_tangents, vertex_bitangents = mesh.expand_tangents(filter)\n vertex_uvs = mesh.expand_face_uvs(filter)\n if len(vertex_positions) < 3:\n logger.warning('Material {} not visible.'.format(material_name))\n continue\n attributes = dict(a_position=vertex_positions, a_normal=\n vertex_normals, a_tangent=vertex_tangents, a_bitangent=\n vertex_bitangents, a_uv=vertex_uvs)\n renderables.append(Renderable(material_name, attributes, model_mat,\n uv_scale=mesh.uv_scale))\n return renderables\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\ndef draw_depth(camera, renderables, rend_target):\n rendfb, rendtex, _ = rend_target\n material = DepthMaterial()\n program = DepthMaterial().compile()\n with rendfb:\n gloo.clear(color=camera.clear_color)\n gloo.set_state(depth_test=True)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_FRONT)\n for renderable in renderables:\n material.upload_camera(program, camera)\n material.upload_attributes(program, renderable._attributes)\n program['u_model'] = renderable.model_mat.T\n program.draw(gl.GL_TRIANGLES)\n gl.glCullFace(gl.GL_BACK)\n gl.glDisable(gl.GL_CULL_FACE)\n",
"step-5": "import logging\nfrom typing import Dict\n\nimport numpy as np\n\nfrom meshkit import Mesh\nfrom rendkit.materials import DepthMaterial\nfrom vispy import gloo, app\nfrom vispy.gloo import gl\n\nlogger = logging.getLogger(__name__)\n\n\nclass Renderable:\n def __init__(self,\n material_name: str,\n attributes: Dict[str, np.ndarray],\n model_mat=np.eye(4),\n uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n\n def set_uv_scale(self, scale):\n self._uv_scale = scale\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(\n num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes, self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n\n return self._program\n\n\ndef mesh_to_renderables(mesh: Mesh, model_mat):\n renderables = []\n # For now each renderable represents a submesh with the same materials.\n for material_id, material_name in enumerate(mesh.materials):\n filter = {'material': material_id}\n vertex_positions = mesh.expand_face_vertices(filter)\n vertex_normals = mesh.expand_face_normals(filter)\n vertex_tangents, vertex_bitangents = mesh.expand_tangents(\n filter)\n vertex_uvs = mesh.expand_face_uvs(filter)\n if len(vertex_positions) < 3:\n logger.warning('Material {} not visible.'.format(material_name))\n continue\n attributes = dict(\n a_position=vertex_positions,\n a_normal=vertex_normals,\n a_tangent=vertex_tangents,\n a_bitangent=vertex_bitangents,\n a_uv=vertex_uvs\n )\n renderables.append(Renderable(material_name, attributes, model_mat,\n uv_scale=mesh.uv_scale))\n return renderables\n\n\nclass DummyRenderer(app.Canvas):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug(\"Using existing OpenGL context.\")\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug(\"Providing temporary context with DummyRenderer.\")\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\ndef draw_depth(camera, renderables, rend_target):\n rendfb, rendtex, _ = rend_target\n\n material = DepthMaterial()\n program = DepthMaterial().compile()\n\n with rendfb:\n gloo.clear(color=camera.clear_color)\n gloo.set_state(depth_test=True)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_FRONT)\n for renderable in renderables:\n material.upload_camera(program, camera)\n material.upload_attributes(program, renderable._attributes)\n program['u_model'] = renderable.model_mat.T\n program.draw(gl.GL_TRIANGLES)\n gl.glCullFace(gl.GL_BACK)\n gl.glDisable(gl.GL_CULL_FACE)\n",
"step-ids": [
10,
11,
13,
16,
17
]
}
|
[
10,
11,
13,
16,
17
] |
import socket
import time
class FileTransProgram(object):
def __init__(self, ADDR, file_name):
self.ADDR = ADDR
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(ADDR)
self.file_name = file_name
def recv(self):
self.sock.send(bytes("Connect", "utf8"))
file_size = self.sock.recv(1024).strip()
with open(self.file_name, "wb") as f:
while file_size > 0:
if file_size < 1024:
f.write(self.sock.recv(1024))
break
else:
f.write(self.sock.recv(1024))
file_size -= 1024
self.sock.send(bytes("Success", "utf-8"))
self.close()
def stor(self):
try:
with open(self.file_name, "rb") as f:
data = f.read()
self.sock.send(bytes(str(len(data)), "utf-8"))
time.sleep(0.2)
self.sock.send(data)
except FileNotFoundError as e:
raise e
ACK = str(self.sock.recv(1024).strip(), "utf-8")
if ACK == "Success":
self.close()
def close(self):
self.sock.close()
|
normal
|
{
"blob_id": "231a07e63e40f2e4d204cde76c52e64b922da1b8",
"index": 2619,
"step-1": "<mask token>\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes('Connect', 'utf8'))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, 'wb') as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes('Success', 'utf-8'))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, 'rb') as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), 'utf-8'))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), 'utf-8')\n if ACK == 'Success':\n self.close()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes('Connect', 'utf8'))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, 'wb') as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes('Success', 'utf-8'))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, 'rb') as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), 'utf-8'))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), 'utf-8')\n if ACK == 'Success':\n self.close()\n\n def close(self):\n self.sock.close()\n",
"step-4": "import socket\nimport time\n\n\nclass FileTransProgram(object):\n\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes('Connect', 'utf8'))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, 'wb') as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes('Success', 'utf-8'))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, 'rb') as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), 'utf-8'))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), 'utf-8')\n if ACK == 'Success':\n self.close()\n\n def close(self):\n self.sock.close()\n",
"step-5": "import socket\nimport time\n\n\nclass FileTransProgram(object):\n def __init__(self, ADDR, file_name):\n self.ADDR = ADDR\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(ADDR)\n self.file_name = file_name\n\n def recv(self):\n self.sock.send(bytes(\"Connect\", \"utf8\"))\n file_size = self.sock.recv(1024).strip()\n with open(self.file_name, \"wb\") as f:\n while file_size > 0:\n if file_size < 1024:\n f.write(self.sock.recv(1024))\n break\n else:\n f.write(self.sock.recv(1024))\n file_size -= 1024\n self.sock.send(bytes(\"Success\", \"utf-8\"))\n self.close()\n\n def stor(self):\n try:\n with open(self.file_name, \"rb\") as f:\n data = f.read()\n self.sock.send(bytes(str(len(data)), \"utf-8\"))\n time.sleep(0.2)\n self.sock.send(data)\n except FileNotFoundError as e:\n raise e\n ACK = str(self.sock.recv(1024).strip(), \"utf-8\")\n if ACK == \"Success\":\n self.close()\n\n def close(self):\n self.sock.close()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f'{label}: no meaningful data')
return
good_data = field[good_mask]
print(
f"""{label} {tfield}:
{good_data.min()}...{good_data.max()}
mean={good_data.mean()}
std={good_data.std()}
"""
)
print('-' * 20)
def test_plot_maps(target_nc_folder, source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f'Source if: {ts}')
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f'Lkeff if: {tl}')
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print('*' * 20)
__print_field_stats(ts, data_s, 'source')
__print_field_stats(tl, data_l, 'lkeff')
print('*' * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f'ms={ms}; ml={ml}')
plt.show()
plt.close(fig)
def main():
target_nc_folder = (
'/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'
)
source_nc_path = (
'/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'
)
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=
source_nc_path)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_plot_area_avg(target_nc_folder='', source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(
field.mask) else np.nan) for field in source_data], index=source_time)
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.
mask) else np.nan) for field in lkeff_data], index=lkeff_time)
s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.
index >= lkeff_time[0])]
assert isinstance(s_source, pd.Series)
print(f'Source: len={len(s_source)}')
print(f'Lkeff: len={len(s_lkeff)}')
fig = plt.figure()
gs = GridSpec(2, 1)
ax = fig.add_subplot(gs[0, 0])
s_source.plot(ax=ax, marker='.', linestyle='None', label='original')
ax.legend()
ax = fig.add_subplot(gs[1, 0], sharex=ax)
s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')
ax.legend()
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f'{label}: no meaningful data')
return
good_data = field[good_mask]
print(
f"""{label} {tfield}:
{good_data.min()}...{good_data.max()}
mean={good_data.mean()}
std={good_data.std()}
"""
)
print('-' * 20)
def test_plot_maps(target_nc_folder, source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f'Source if: {ts}')
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f'Lkeff if: {tl}')
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print('*' * 20)
__print_field_stats(ts, data_s, 'source')
__print_field_stats(tl, data_l, 'lkeff')
print('*' * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f'ms={ms}; ml={ml}')
plt.show()
plt.close(fig)
def main():
target_nc_folder = (
'/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'
)
source_nc_path = (
'/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'
)
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=
source_nc_path)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_plot_area_avg(target_nc_folder='', source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(
field.mask) else np.nan) for field in source_data], index=source_time)
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.
mask) else np.nan) for field in lkeff_data], index=lkeff_time)
s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.
index >= lkeff_time[0])]
assert isinstance(s_source, pd.Series)
print(f'Source: len={len(s_source)}')
print(f'Lkeff: len={len(s_lkeff)}')
fig = plt.figure()
gs = GridSpec(2, 1)
ax = fig.add_subplot(gs[0, 0])
s_source.plot(ax=ax, marker='.', linestyle='None', label='original')
ax.legend()
ax = fig.add_subplot(gs[1, 0], sharex=ax)
s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')
ax.legend()
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f'{label}: no meaningful data')
return
good_data = field[good_mask]
print(
f"""{label} {tfield}:
{good_data.min()}...{good_data.max()}
mean={good_data.mean()}
std={good_data.std()}
"""
)
print('-' * 20)
def test_plot_maps(target_nc_folder, source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f'Source if: {ts}')
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f'Lkeff if: {tl}')
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print('*' * 20)
__print_field_stats(ts, data_s, 'source')
__print_field_stats(tl, data_l, 'lkeff')
print('*' * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f'ms={ms}; ml={ml}')
plt.show()
plt.close(fig)
def main():
target_nc_folder = (
'/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'
)
source_nc_path = (
'/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'
)
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=
source_nc_path)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from datetime import datetime
import xarray
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.dates import date2num
import numpy as np
from matplotlib.gridspec import GridSpec
def test_plot_area_avg(target_nc_folder='', source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(
field.mask) else np.nan) for field in source_data], index=source_time)
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.
mask) else np.nan) for field in lkeff_data], index=lkeff_time)
s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.
index >= lkeff_time[0])]
assert isinstance(s_source, pd.Series)
print(f'Source: len={len(s_source)}')
print(f'Lkeff: len={len(s_lkeff)}')
fig = plt.figure()
gs = GridSpec(2, 1)
ax = fig.add_subplot(gs[0, 0])
s_source.plot(ax=ax, marker='.', linestyle='None', label='original')
ax.legend()
ax = fig.add_subplot(gs[1, 0], sharex=ax)
s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')
ax.legend()
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f'{label}: no meaningful data')
return
good_data = field[good_mask]
print(
f"""{label} {tfield}:
{good_data.min()}...{good_data.max()}
mean={good_data.mean()}
std={good_data.std()}
"""
)
print('-' * 20)
def test_plot_maps(target_nc_folder, source_nc_path=''):
ice_fr = xarray.open_dataset(source_nc_path)['LC']
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords['time']
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[
'lake_ice_fraction']
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f'Source if: {ts}')
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f'Lkeff if: {tl}')
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print('*' * 20)
__print_field_stats(ts, data_s, 'source')
__print_field_stats(tl, data_l, 'lkeff')
print('*' * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f'ms={ms}; ml={ml}')
plt.show()
plt.close(fig)
def main():
target_nc_folder = (
'/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'
)
source_nc_path = (
'/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'
)
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=
source_nc_path)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from datetime import datetime
import xarray
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.dates import date2num
import numpy as np
from matplotlib.gridspec import GridSpec
def test_plot_area_avg(target_nc_folder="", source_nc_path=""):
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009"
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_icefix_Obs_1980-1981_test"
#target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1"
ice_fr = xarray.open_dataset(source_nc_path)["LC"]
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
# t, x, y
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords["time"]
source_time = pd.to_datetime(source_time.values.tolist())
s_source = pd.Series(data=[
(field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in source_data
], index=source_time)
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + "/*daily.nc")["lake_ice_fraction"]
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords["t"].values.tolist())
s_lkeff = pd.Series([
(field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in lkeff_data
], index=lkeff_time)
s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.index >= lkeff_time[0])]
assert isinstance(s_source, pd.Series)
#
print(f"Source: len={len(s_source)}")
print(f"Lkeff: len={len(s_lkeff)}")
# do the plotting
fig = plt.figure()
gs = GridSpec(2, 1)
# plot initial lake fractions
ax = fig.add_subplot(gs[0, 0])
s_source.plot(ax=ax, marker=".", linestyle="None", label="original")
ax.legend()
# plot lake fractions outputed by hles algorithm
ax = fig.add_subplot(gs[1, 0], sharex=ax)
s_lkeff.plot(ax=ax, marker=".", linestyle="None", label="lkeff")
ax.legend()
# plt.show()
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f"{label}: no meaningful data")
return
good_data = field[good_mask]
print(f"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\n"
f"mean={good_data.mean()}\n"
f"std={good_data.std()}\n")
print("-" * 20)
def test_plot_maps(target_nc_folder, source_nc_path=""):
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009"
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1"
ice_fr = xarray.open_dataset(source_nc_path)["LC"]
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
# t, x, y
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords["time"]
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + "/*daily.nc")["lake_ice_fraction"]
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords["t"].values.tolist())
# select from lkeff data
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
# Select from the source time and data
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f"Source if: {ts}")
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f"Lkeff if: {tl}")
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print("*" * 20)
__print_field_stats(ts, data_s, "source")
__print_field_stats(tl, data_l, "lkeff")
print("*" * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f"ms={ms}; ml={ml}")
plt.show()
plt.close(fig)
def main():
target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981"
# source_nc_path = "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260/cis_nic_glerl_interpolated_lc.nc"
source_nc_path = "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc"
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)
# test_plot_maps(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)
plt.show()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "2d5e147b081283047cd044746d73d91ee2e59052",
"index": 4139,
"step-1": "<mask token>\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_plot_area_avg(target_nc_folder='', source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(\n field.mask) else np.nan) for field in source_data], index=source_time)\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.\n mask) else np.nan) for field in lkeff_data], index=lkeff_time)\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.\n index >= lkeff_time[0])]\n assert isinstance(s_source, pd.Series)\n print(f'Source: len={len(s_source)}')\n print(f'Lkeff: len={len(s_lkeff)}')\n fig = plt.figure()\n gs = GridSpec(2, 1)\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker='.', linestyle='None', label='original')\n ax.legend()\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')\n ax.legend()\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_plot_area_avg(target_nc_folder='', source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(\n field.mask) else np.nan) for field in source_data], index=source_time)\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.\n mask) else np.nan) for field in lkeff_data], index=lkeff_time)\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.\n index >= lkeff_time[0])]\n assert isinstance(s_source, pd.Series)\n print(f'Source: len={len(s_source)}')\n print(f'Lkeff: len={len(s_lkeff)}')\n fig = plt.figure()\n gs = GridSpec(2, 1)\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker='.', linestyle='None', label='original')\n ax.legend()\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')\n ax.legend()\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from datetime import datetime\nimport xarray\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.dates import date2num\nimport numpy as np\nfrom matplotlib.gridspec import GridSpec\n\n\ndef test_plot_area_avg(target_nc_folder='', source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(\n field.mask) else np.nan) for field in source_data], index=source_time)\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.\n mask) else np.nan) for field in lkeff_data], index=lkeff_time)\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.\n index >= lkeff_time[0])]\n assert isinstance(s_source, pd.Series)\n print(f'Source: len={len(s_source)}')\n print(f'Lkeff: len={len(s_lkeff)}')\n fig = plt.figure()\n gs = GridSpec(2, 1)\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker='.', linestyle='None', label='original')\n ax.legend()\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')\n ax.legend()\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from datetime import datetime\n\nimport xarray\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.dates import date2num\nimport numpy as np\nfrom matplotlib.gridspec import GridSpec\n\n\ndef test_plot_area_avg(target_nc_folder=\"\", source_nc_path=\"\"):\n\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009\"\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_icefix_Obs_1980-1981_test\"\n\n #target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1\"\n\n\n\n ice_fr = xarray.open_dataset(source_nc_path)[\"LC\"]\n\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n\n\n # t, x, y\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords[\"time\"]\n source_time = pd.to_datetime(source_time.values.tolist())\n\n s_source = pd.Series(data=[\n (field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in source_data\n ], index=source_time)\n\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + \"/*daily.nc\")[\"lake_ice_fraction\"]\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords[\"t\"].values.tolist())\n\n s_lkeff = pd.Series([\n (field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in lkeff_data\n ], index=lkeff_time)\n\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.index >= lkeff_time[0])]\n\n assert isinstance(s_source, pd.Series)\n\n #\n print(f\"Source: len={len(s_source)}\")\n print(f\"Lkeff: len={len(s_lkeff)}\")\n\n # do the plotting\n fig = plt.figure()\n gs = GridSpec(2, 1)\n # plot initial lake fractions\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker=\".\", linestyle=\"None\", label=\"original\")\n ax.legend()\n\n\n # plot lake fractions outputed by hles algorithm\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker=\".\", linestyle=\"None\", label=\"lkeff\")\n\n ax.legend()\n # plt.show()\n\n\ndef __print_field_stats(tfield, field, label):\n\n\n good_mask = ~field.mask\n\n if not np.any(good_mask):\n print(f\"{label}: no meaningful data\")\n return\n\n good_data = field[good_mask]\n print(f\"{label} {tfield}:\\n{good_data.min()}...{good_data.max()}\\n\"\n f\"mean={good_data.mean()}\\n\"\n f\"std={good_data.std()}\\n\")\n print(\"-\" * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=\"\"):\n\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009\"\n\n\n\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1\"\n\n ice_fr = xarray.open_dataset(source_nc_path)[\"LC\"]\n\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n\n\n start_date = datetime(1981, 1, 1)\n\n\n # t, x, y\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords[\"time\"]\n source_time = pd.to_datetime(source_time.values.tolist())\n\n\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + \"/*daily.nc\")[\"lake_ice_fraction\"]\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords[\"t\"].values.tolist())\n\n\n # select from lkeff data\n lkeff_time_sel = []\n lkeff_data_sel = []\n\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n\n\n\n # Select from the source time and data\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n\n\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n\n fig = plt.figure(figsize=(20, 10))\n\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f\"Source if: {ts}\")\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f\"Lkeff if: {tl}\")\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n\n print(\"*\" * 20)\n __print_field_stats(ts, data_s, \"source\")\n __print_field_stats(tl, data_l, \"lkeff\")\n print(\"*\" * 20)\n\n\n\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f\"ms={ms}; ml={ml}\")\n plt.show()\n\n plt.close(fig)\n\n\n\n\n\n\ndef main():\n target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981\"\n # source_nc_path = \"/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260/cis_nic_glerl_interpolated_lc.nc\"\n source_nc_path = \"/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc\"\n\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)\n # test_plot_maps(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)\n plt.show()\n\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import scrapy
from scrapy.loader import ItemLoader
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://blog.scrapinghub.com']
def content_title_parser(self, mystr):
return mystr[0].split(' ')[3]
def parse(self, response):
for url in response.css('ul li a::attr("href")').re('.*/category/.*'):
yield scrapy.Request(response.urljoin(url), self.parse_titles)
def parse_titles(self, response):
l = ItemLoader(item=Posts(), response=response)
l.add_css('content_title', 'h1.pagetitle::text', self.
content_title_parser)
l.add_css('post_title', 'div.entries > ul > li a::text')
return l.load_item()
class Posts(scrapy.Item):
content_title = scrapy.Field()
post_title = scrapy.Field()
|
normal
|
{
"blob_id": "4c79dcf394acbcc9a636bcc9b0aac13a2bafc7e3",
"index": 9249,
"step-1": "<mask token>\n\n\nclass BlogSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n <mask token>\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n",
"step-2": "<mask token>\n\n\nclass BlogSpider(scrapy.Spider):\n <mask token>\n <mask token>\n\n def content_title_parser(self, mystr):\n return mystr[0].split(' ')[3]\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n <mask token>\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n",
"step-3": "<mask token>\n\n\nclass BlogSpider(scrapy.Spider):\n name = 'blogspider'\n start_urls = ['https://blog.scrapinghub.com']\n\n def content_title_parser(self, mystr):\n return mystr[0].split(' ')[3]\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n\n def parse_titles(self, response):\n l = ItemLoader(item=Posts(), response=response)\n l.add_css('content_title', 'h1.pagetitle::text', self.\n content_title_parser)\n l.add_css('post_title', 'div.entries > ul > li a::text')\n return l.load_item()\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n",
"step-4": "import scrapy\nfrom scrapy.loader import ItemLoader\n\n\nclass BlogSpider(scrapy.Spider):\n name = 'blogspider'\n start_urls = ['https://blog.scrapinghub.com']\n\n def content_title_parser(self, mystr):\n return mystr[0].split(' ')[3]\n\n def parse(self, response):\n for url in response.css('ul li a::attr(\"href\")').re('.*/category/.*'):\n yield scrapy.Request(response.urljoin(url), self.parse_titles)\n\n def parse_titles(self, response):\n l = ItemLoader(item=Posts(), response=response)\n l.add_css('content_title', 'h1.pagetitle::text', self.\n content_title_parser)\n l.add_css('post_title', 'div.entries > ul > li a::text')\n return l.load_item()\n\n\nclass Posts(scrapy.Item):\n content_title = scrapy.Field()\n post_title = scrapy.Field()\n",
"step-5": null,
"step-ids": [
4,
5,
7,
8
]
}
|
[
4,
5,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(x)
<|reserved_special_token_1|>
x = 'Programming is like building a multilingual puzzle\n'
print(x)
<|reserved_special_token_1|>
#!/usr/bin/env python3
x = "Programming is like building a multilingual puzzle\n"
print (x)
|
flexible
|
{
"blob_id": "95c0ba757b7561ef6cc0ad312034e2695f8420c3",
"index": 3933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x)\n",
"step-3": "x = 'Programming is like building a multilingual puzzle\\n'\nprint(x)\n",
"step-4": "#!/usr/bin/env python3\n\nx = \"Programming is like building a multilingual puzzle\\n\"\n\n\nprint (x)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f(h):
Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)
Vs = 4 * pi * r ** 3 / 3
return ρw * Vw - ρs * Vs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
r = 1.0
ρs = 200.0
ρw = 1000.0
def f(h):
Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)
Vs = 4 * pi * r ** 3 / 3
return ρw * Vw - ρs * Vs
xr = root_regula_falsi(f, 0.0, 2 * r)
<|reserved_special_token_1|>
from math import pi
from root_regula_falsi import *
r = 1.0
ρs = 200.0
ρw = 1000.0
def f(h):
Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)
Vs = 4 * pi * r ** 3 / 3
return ρw * Vw - ρs * Vs
xr = root_regula_falsi(f, 0.0, 2 * r)
<|reserved_special_token_1|>
from math import pi
from root_regula_falsi import *
r = 1.0
ρs = 200.0
ρw = 1000.0
def f(h):
Vw = 4*pi*r**3/3 - pi*h**2/3*(3*r - h) # displaced volume of water
Vs = 4*pi*r**3/3
return ρw*Vw - ρs*Vs
xr = root_regula_falsi(f, 0.0, 2*r)
|
flexible
|
{
"blob_id": "3e7d2bacb15c39658ef5044685b73068deb1c145",
"index": 6060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\n<mask token>\n",
"step-3": "<mask token>\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2 * r)\n",
"step-4": "from math import pi\nfrom root_regula_falsi import *\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2 * r)\n",
"step-5": "from math import pi\nfrom root_regula_falsi import *\n\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\ndef f(h):\n Vw = 4*pi*r**3/3 - pi*h**2/3*(3*r - h) # displaced volume of water\n Vs = 4*pi*r**3/3\n return ρw*Vw - ρs*Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2*r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Script start
print"This is the two number subtraction python program."
a = 9
b = 2
c = a - b
print c
# Scrip close
|
normal
|
{
"blob_id": "a045423edd94d985dfc9660bcfe4a88c61bf4574",
"index": 20,
"step-1": "#Script start\nprint\"This is the two number subtraction python program.\"\na = 9\nb = 2\nc = a - b\nprint c\n\n# Scrip close\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import numpy as np
import pycuda
import pycuda.driver as driver
import cudasim.solvers.cuda.Simulator_mg as sim
import cudasim
class Lsoda(sim.SimulatorMG):
_param_tex = None
_step_code = None
_runtimeCompile = True
_lsoda_source_ = """
extern "C"{
#include <stdio.h>
__device__ myFex myfex;
__device__ myJex myjex;
__global__ void init_common(){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
cuLsodaCommonBlockInit( &(common[tid]) );
}
__global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol,
double *rtol, double *atol, int *itask, int *istate, int *iopt,
double *rwork, int *lrw, int *iwork, int *liw, int *jt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//if(tid==0){
//printf("I am thread time %d %f\\n", tid, t[0] );
//}
dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid,
istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );
//if(tid==0){
//printf("I am done %d %f\\n", tid, t[0] );
//}
}
}
"""
def _compile(self, step_code):
# set beta to 1: repeats are pointless as simulation is deterministic
self._beta = 1
fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'cuLsoda_all.cu'), 'r')
_sourceFromFile_ = fc.read()
_isize_ = "#define ISIZE " + repr(20 + self._speciesNumber) + "\n"
_rsize_ = "#define RSIZE " + repr(22 + self._speciesNumber * max(16, self._speciesNumber + 9)) + "\n"
_textures_ = "texture<float, 2, cudaReadModeElementType> param_tex;\n"
_common_block_ = "__device__ struct cuLsodaCommonBlock common[" + repr(1 * 1) + "];\n"
_code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_
if self._dump:
of = open("full_ode_code.cu", "w")
print >> of, _code_
# dummy compile to determine optimal blockSize and gridSize
compiled = pycuda.compiler.SourceModule(_code_, nvcc="nvcc", options=[], no_extern_c=True, keep=False)
blocks, threads = self._getOptimalGPUParam(compiled.get_function("cuLsoda"))
blocks = self._MAXBLOCKSPERDEVICE
# real compile
_common_block_ = "__device__ struct cuLsodaCommonBlock common[" + repr(blocks * threads) + "];\n"
_code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_
if self._dump:
of = open("full_ode_code.cu", "w")
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc="nvcc", options=[], no_extern_c=True, keep=False)
self._param_tex = compiled.get_texref("param_tex")
lsoda_kernel = compiled.get_function("cuLsoda")
return compiled, lsoda_kernel
def _run_simulation(self, parameters, init_values, blocks, threads, in_atol=1e-6, in_rtol=1e-6):
total_threads = threads * blocks
experiments = len(parameters)
neqn = self._speciesNumber
# compile
init_common_kernel = self._completeCode.get_function("init_common")
init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))
# output array
ret_xt = np.zeros([total_threads, 1, self._resultNumber, self._speciesNumber])
ret_istate = np.ones([total_threads], dtype=np.int32)
# calculate sizes of work spaces
isize = 20 + self._speciesNumber
rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)
# local variables
t = np.zeros([total_threads], dtype=np.float64)
jt = np.zeros([total_threads], dtype=np.int32)
neq = np.zeros([total_threads], dtype=np.int32)
itol = np.zeros([total_threads], dtype=np.int32)
iopt = np.zeros([total_threads], dtype=np.int32)
rtol = np.zeros([total_threads], dtype=np.float64)
iout = np.zeros([total_threads], dtype=np.int32)
tout = np.zeros([total_threads], dtype=np.float64)
itask = np.zeros([total_threads], dtype=np.int32)
istate = np.zeros([total_threads], dtype=np.int32)
atol = np.zeros([total_threads], dtype=np.float64)
liw = np.zeros([total_threads], dtype=np.int32)
lrw = np.zeros([total_threads], dtype=np.int32)
iwork = np.zeros([isize * total_threads], dtype=np.int32)
rwork = np.zeros([rsize * total_threads], dtype=np.float64)
y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)
for i in range(total_threads):
neq[i] = neqn
t[i] = 0
itol[i] = 1
itask[i] = 1
istate[i] = 1
iopt[i] = 0
jt[i] = 2
atol[i] = in_atol
rtol[i] = in_rtol
liw[i] = isize
lrw[i] = rsize
try:
# initial conditions
for j in range(self._speciesNumber):
# loop over species
y[i * self._speciesNumber + j] = init_values[i][j]
ret_xt[i, 0, 0, j] = init_values[i][j]
except IndexError:
pass
# allocate on device
d_t = driver.mem_alloc(t.size * t.dtype.itemsize)
d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)
d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)
d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)
d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)
d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)
d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)
d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)
d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)
d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)
d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)
d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)
d_y = driver.mem_alloc(y.size * y.dtype.itemsize)
d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)
d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)
d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)
# copy to device
driver.memcpy_htod(d_t, t)
driver.memcpy_htod(d_jt, jt)
driver.memcpy_htod(d_neq, neq)
driver.memcpy_htod(d_liw, liw)
driver.memcpy_htod(d_lrw, lrw)
driver.memcpy_htod(d_itol, itol)
driver.memcpy_htod(d_iopt, iopt)
driver.memcpy_htod(d_rtol, rtol)
driver.memcpy_htod(d_iout, iout)
driver.memcpy_htod(d_tout, tout)
driver.memcpy_htod(d_itask, itask)
driver.memcpy_htod(d_istate, istate)
driver.memcpy_htod(d_y, y)
driver.memcpy_htod(d_atol, atol)
driver.memcpy_htod(d_iwork, iwork)
driver.memcpy_htod(d_rwork, rwork)
param = np.zeros((total_threads, self._parameterNumber), dtype=np.float32)
try:
for i in range(len(parameters)):
for j in range(self._parameterNumber):
param[i][j] = parameters[i][j]
except IndexError:
pass
# parameter texture
ary = sim.create_2D_array(param)
sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4, total_threads)
self._param_tex.set_array(ary)
if self._dt <= 0:
for i in range(self._resultNumber):
for j in range(total_threads):
tout[j] = self._timepoints[i]
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,
d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
# end of loop over time points
else:
tt = self._timepoints[0]
for i in range(self._resultNumber):
while 1:
next_time = min(tt + self._dt, self._timepoints[i])
for j in range(total_threads):
tout[j] = next_time
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,
d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
if np.abs(next_time - self._timepoints[i]) < 1e-5:
tt = next_time
break
tt = next_time
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
# loop over and check ret_istate
# it will will be zero if there was problems
for j in range(total_threads):
if ret_istate[j] == 0:
for i in range(self._resultNumber):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = float('NaN')
return ret_xt[0:experiments]
|
normal
|
{
"blob_id": "e9754530bef7614c16cdba0e818c1fa188e2d9a2",
"index": 9940,
"step-1": "<mask token>\n\n\nclass Lsoda(sim.SimulatorMG):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Lsoda(sim.SimulatorMG):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads,\n in_atol=1e-06, in_rtol=1e-06):\n total_threads = threads * blocks\n experiments = len(parameters)\n neqn = self._speciesNumber\n init_common_kernel = self._completeCode.get_function('init_common')\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.\n _speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n liw[i] = isize\n lrw[i] = rsize\n try:\n for j in range(self._speciesNumber):\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.\n float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,\n total_threads)\n self._param_tex.set_array(ary)\n if self._dt <= 0:\n for i in range(self._resultNumber):\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n else:\n tt = self._timepoints[0]\n for i in range(self._resultNumber):\n while 1:\n next_time = min(tt + self._dt, self._timepoints[i])\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n if np.abs(next_time - self._timepoints[i]) < 1e-05:\n tt = next_time\n break\n tt = next_time\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n return ret_xt[0:experiments]\n",
"step-3": "<mask token>\n\n\nclass Lsoda(sim.SimulatorMG):\n _param_tex = None\n _step_code = None\n _runtimeCompile = True\n _lsoda_source_ = \"\"\"\n \n extern \"C\"{\n\n #include <stdio.h>\n \n __device__ myFex myfex;\n __device__ myJex myjex;\n \n __global__ void init_common(){\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n cuLsodaCommonBlockInit( &(common[tid]) );\n }\n \n __global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol, \n double *rtol, double *atol, int *itask, int *istate, int *iopt, \n double *rwork, int *lrw, int *iwork, int *liw, int *jt)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n //if(tid==0){\n //printf(\"I am thread time %d %f\\\\n\", tid, t[0] );\n //}\n\n dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid, \n istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );\n\n //if(tid==0){\n //printf(\"I am done %d %f\\\\n\", tid, t[0] );\n //}\n }\n }\n \n \"\"\"\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads,\n in_atol=1e-06, in_rtol=1e-06):\n total_threads = threads * blocks\n experiments = len(parameters)\n neqn = self._speciesNumber\n init_common_kernel = self._completeCode.get_function('init_common')\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.\n _speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n liw[i] = isize\n lrw[i] = rsize\n try:\n for j in range(self._speciesNumber):\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.\n float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,\n total_threads)\n self._param_tex.set_array(ary)\n if self._dt <= 0:\n for i in range(self._resultNumber):\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n else:\n tt = self._timepoints[0]\n for i in range(self._resultNumber):\n while 1:\n next_time = min(tt + self._dt, self._timepoints[i])\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n if np.abs(next_time - self._timepoints[i]) < 1e-05:\n tt = next_time\n break\n tt = next_time\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n return ret_xt[0:experiments]\n",
"step-4": "import os\nimport numpy as np\nimport pycuda\nimport pycuda.driver as driver\nimport cudasim.solvers.cuda.Simulator_mg as sim\nimport cudasim\n\n\nclass Lsoda(sim.SimulatorMG):\n _param_tex = None\n _step_code = None\n _runtimeCompile = True\n _lsoda_source_ = \"\"\"\n \n extern \"C\"{\n\n #include <stdio.h>\n \n __device__ myFex myfex;\n __device__ myJex myjex;\n \n __global__ void init_common(){\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n cuLsodaCommonBlockInit( &(common[tid]) );\n }\n \n __global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol, \n double *rtol, double *atol, int *itask, int *istate, int *iopt, \n double *rwork, int *lrw, int *iwork, int *liw, int *jt)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n //if(tid==0){\n //printf(\"I am thread time %d %f\\\\n\", tid, t[0] );\n //}\n\n dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid, \n istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );\n\n //if(tid==0){\n //printf(\"I am done %d %f\\\\n\", tid, t[0] );\n //}\n }\n }\n \n \"\"\"\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads,\n in_atol=1e-06, in_rtol=1e-06):\n total_threads = threads * blocks\n experiments = len(parameters)\n neqn = self._speciesNumber\n init_common_kernel = self._completeCode.get_function('init_common')\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.\n _speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n liw[i] = isize\n lrw[i] = rsize\n try:\n for j in range(self._speciesNumber):\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.\n float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,\n total_threads)\n self._param_tex.set_array(ary)\n if self._dt <= 0:\n for i in range(self._resultNumber):\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n else:\n tt = self._timepoints[0]\n for i in range(self._resultNumber):\n while 1:\n next_time = min(tt + self._dt, self._timepoints[i])\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n if np.abs(next_time - self._timepoints[i]) < 1e-05:\n tt = next_time\n break\n tt = next_time\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n return ret_xt[0:experiments]\n",
"step-5": "import os\n\nimport numpy as np\nimport pycuda\nimport pycuda.driver as driver\n\nimport cudasim.solvers.cuda.Simulator_mg as sim\nimport cudasim\n\nclass Lsoda(sim.SimulatorMG):\n _param_tex = None\n\n _step_code = None\n _runtimeCompile = True\n\n _lsoda_source_ = \"\"\"\n \n extern \"C\"{\n\n #include <stdio.h>\n \n __device__ myFex myfex;\n __device__ myJex myjex;\n \n __global__ void init_common(){\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n cuLsodaCommonBlockInit( &(common[tid]) );\n }\n \n __global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol, \n double *rtol, double *atol, int *itask, int *istate, int *iopt, \n double *rwork, int *lrw, int *iwork, int *liw, int *jt)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n //if(tid==0){\n //printf(\"I am thread time %d %f\\\\n\", tid, t[0] );\n //}\n\n dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid, \n istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );\n\n //if(tid==0){\n //printf(\"I am done %d %f\\\\n\", tid, t[0] );\n //}\n }\n }\n \n \"\"\"\n\n def _compile(self, step_code):\n # set beta to 1: repeats are pointless as simulation is deterministic\n self._beta = 1\n\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'cuLsoda_all.cu'), 'r')\n\n _sourceFromFile_ = fc.read()\n\n _isize_ = \"#define ISIZE \" + repr(20 + self._speciesNumber) + \"\\n\"\n _rsize_ = \"#define RSIZE \" + repr(22 + self._speciesNumber * max(16, self._speciesNumber + 9)) + \"\\n\"\n\n _textures_ = \"texture<float, 2, cudaReadModeElementType> param_tex;\\n\"\n _common_block_ = \"__device__ struct cuLsodaCommonBlock common[\" + repr(1 * 1) + \"];\\n\"\n _code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_\n\n if self._dump:\n of = open(\"full_ode_code.cu\", \"w\")\n print >> of, _code_\n\n # dummy compile to determine optimal blockSize and gridSize\n compiled = pycuda.compiler.SourceModule(_code_, nvcc=\"nvcc\", options=[], no_extern_c=True, keep=False)\n\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\"cuLsoda\"))\n blocks = self._MAXBLOCKSPERDEVICE\n\n # real compile\n _common_block_ = \"__device__ struct cuLsodaCommonBlock common[\" + repr(blocks * threads) + \"];\\n\"\n _code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_\n\n if self._dump:\n of = open(\"full_ode_code.cu\", \"w\")\n print >> of, _code_\n\n compiled = pycuda.compiler.SourceModule(_code_, nvcc=\"nvcc\", options=[], no_extern_c=True, keep=False)\n\n self._param_tex = compiled.get_texref(\"param_tex\")\n\n lsoda_kernel = compiled.get_function(\"cuLsoda\")\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads, in_atol=1e-6, in_rtol=1e-6):\n\n total_threads = threads * blocks\n experiments = len(parameters)\n\n neqn = self._speciesNumber\n\n # compile\n init_common_kernel = self._completeCode.get_function(\"init_common\")\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n\n # output array\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self._speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n\n # calculate sizes of work spaces\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n\n # local variables\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n\n liw[i] = isize\n lrw[i] = rsize\n\n try:\n # initial conditions\n for j in range(self._speciesNumber):\n # loop over species\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n\n # allocate on device\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n\n # copy to device\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n\n # parameter texture\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4, total_threads)\n self._param_tex.set_array(ary)\n\n if self._dt <= 0:\n for i in range(self._resultNumber):\n\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,\n d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n\n if istate[j] < 0:\n ret_istate[j] = 0\n\n # end of loop over time points\n\n else:\n tt = self._timepoints[0]\n\n for i in range(self._resultNumber):\n while 1:\n\n next_time = min(tt + self._dt, self._timepoints[i])\n\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,\n d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n\n if np.abs(next_time - self._timepoints[i]) < 1e-5:\n tt = next_time\n break\n\n tt = next_time\n\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n\n if istate[j] < 0:\n ret_istate[j] = 0\n\n # loop over and check ret_istate\n # it will will be zero if there was problems\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n\n return ret_xt[0:experiments]\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def getChromosome(str):
if str == '*' or str[3:] == 'X':
return -1
try:
return int(str[3:])
except:
return -1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parseFile(file, frequency_tree):
readnumber = re.compile('[r]+\\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
num_a = 0
num_c = 0
num_g = 0
num_t = 0
print('############# OPENING SAM FILE', file=sys.stderr)
with open(file, 'rt') as fp:
line = fp.readline()
while line:
subline = line_spliter.split(line)
line = fp.readline()
if int(subline[1]) & 4 == 4:
unmatched_reads += 1
elif int(subline[1]) & 16 == 16:
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
bases_count = Counter(read)
num_a += bases_count['A']
num_c += bases_count['C']
num_g += bases_count['G']
num_t += bases_count['T']
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if forward_reads + reverse_reads + unmatched_reads != 0:
read_frequency = (forward_reads + reverse_reads) / (
forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = (gene_annotation_match /
gene_annotation_total)
print('gene_annotation_percent = ' + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] -
position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average,
read_frequency, std_of_pos_diff, mean_of_pos_diffs,
num_chromosomes, max_position_difference,
min_position_difference, num_a / read_lengths_total, num_c /
read_lengths_total, num_g / read_lengths_total, num_t /
read_lengths_total]
<|reserved_special_token_0|>
def getChromosome(str):
if str == '*' or str[3:] == 'X':
return -1
try:
return int(str[3:])
except:
return -1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parseFile(file, frequency_tree):
readnumber = re.compile('[r]+\\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
num_a = 0
num_c = 0
num_g = 0
num_t = 0
print('############# OPENING SAM FILE', file=sys.stderr)
with open(file, 'rt') as fp:
line = fp.readline()
while line:
subline = line_spliter.split(line)
line = fp.readline()
if int(subline[1]) & 4 == 4:
unmatched_reads += 1
elif int(subline[1]) & 16 == 16:
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
bases_count = Counter(read)
num_a += bases_count['A']
num_c += bases_count['C']
num_g += bases_count['G']
num_t += bases_count['T']
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if forward_reads + reverse_reads + unmatched_reads != 0:
read_frequency = (forward_reads + reverse_reads) / (
forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = (gene_annotation_match /
gene_annotation_total)
print('gene_annotation_percent = ' + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] -
position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average,
read_frequency, std_of_pos_diff, mean_of_pos_diffs,
num_chromosomes, max_position_difference,
min_position_difference, num_a / read_lengths_total, num_c /
read_lengths_total, num_g / read_lengths_total, num_t /
read_lengths_total]
def parseString(txt, frequency_tree):
spliter = re.compile('\n+')
readnumber = re.compile('[r]+\\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
lines = spliter.split(txt)
for i in range(len(lines) - 1):
subline = line_spliter.split(lines[i])
if int(subline[1]) & 4 == 4:
unmatched_reads += 1
elif int(subline[1]) & 16 == 16:
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if forward_reads + reverse_reads + unmatched_reads != 0:
read_frequency = (forward_reads + reverse_reads) / (forward_reads +
reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = gene_annotation_match / gene_annotation_total
print('gene_annotation_percent = ' + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] - position_list[i]
)
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average, read_frequency,
std_of_pos_diff, mean_of_pos_diffs, num_chromosomes,
max_position_difference, min_position_difference]
def getChromosome(str):
if str == '*' or str[3:] == 'X':
return -1
try:
return int(str[3:])
except:
return -1
<|reserved_special_token_1|>
from collections import defaultdict, Counter
import numpy as np
import sys
import re
def parseFile(file, frequency_tree):
readnumber = re.compile('[r]+\\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
num_a = 0
num_c = 0
num_g = 0
num_t = 0
print('############# OPENING SAM FILE', file=sys.stderr)
with open(file, 'rt') as fp:
line = fp.readline()
while line:
subline = line_spliter.split(line)
line = fp.readline()
if int(subline[1]) & 4 == 4:
unmatched_reads += 1
elif int(subline[1]) & 16 == 16:
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
bases_count = Counter(read)
num_a += bases_count['A']
num_c += bases_count['C']
num_g += bases_count['G']
num_t += bases_count['T']
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if forward_reads + reverse_reads + unmatched_reads != 0:
read_frequency = (forward_reads + reverse_reads) / (
forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = (gene_annotation_match /
gene_annotation_total)
print('gene_annotation_percent = ' + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] -
position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average,
read_frequency, std_of_pos_diff, mean_of_pos_diffs,
num_chromosomes, max_position_difference,
min_position_difference, num_a / read_lengths_total, num_c /
read_lengths_total, num_g / read_lengths_total, num_t /
read_lengths_total]
def parseString(txt, frequency_tree):
spliter = re.compile('\n+')
readnumber = re.compile('[r]+\\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
lines = spliter.split(txt)
for i in range(len(lines) - 1):
subline = line_spliter.split(lines[i])
if int(subline[1]) & 4 == 4:
unmatched_reads += 1
elif int(subline[1]) & 16 == 16:
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if forward_reads + reverse_reads + unmatched_reads != 0:
read_frequency = (forward_reads + reverse_reads) / (forward_reads +
reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = gene_annotation_match / gene_annotation_total
print('gene_annotation_percent = ' + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] - position_list[i]
)
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average, read_frequency,
std_of_pos_diff, mean_of_pos_diffs, num_chromosomes,
max_position_difference, min_position_difference]
def getChromosome(str):
if str == '*' or str[3:] == 'X':
return -1
try:
return int(str[3:])
except:
return -1
<|reserved_special_token_1|>
from collections import defaultdict, Counter
import numpy as np
import sys
import re
def parseFile(file, frequency_tree):
readnumber = re.compile('[r]+\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
num_a = 0
num_c = 0
num_g = 0
num_t = 0
print("############# OPENING SAM FILE", file=sys.stderr)
with open(file, 'rt') as fp:
line = fp.readline()
while line:
subline = line_spliter.split(line)
line = fp.readline()
if (int(subline[1]) & 4 == 4):
unmatched_reads += 1
elif (int(subline[1]) & 16 == 16):
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
bases_count = Counter(read)
num_a += bases_count["A"]
num_c += bases_count["C"]
num_g += bases_count["G"]
num_t += bases_count["T"]
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if (forward_reads + reverse_reads + unmatched_reads) != 0:
read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
#TODO there is for sure a better way to do this than with a break
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = gene_annotation_match / gene_annotation_total
print("gene_annotation_percent = " + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] - position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference, num_a/ read_lengths_total, num_c/ read_lengths_total, num_g / read_lengths_total, num_t / read_lengths_total]
def parseString(txt, frequency_tree):
spliter = re.compile('\n+')
readnumber = re.compile('[r]+\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
lines = spliter.split(txt)
#Itterating though everyline
for i in range(len(lines) - 1):
subline = line_spliter.split(lines[i])
if (int(subline[1]) & 4 == 4):
unmatched_reads += 1
elif (int(subline[1]) & 16 == 16):
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if (forward_reads + reverse_reads + unmatched_reads) != 0:
read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
#TODO there is for sure a better way to do this than with a break
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = gene_annotation_match / gene_annotation_total
print("gene_annotation_percent = " + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] - position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference]
def getChromosome(str):
if str == "*" or str[3:] == 'X':
return -1
try:
return int(str[3:])
except:
return -1
|
flexible
|
{
"blob_id": "227b71cb6d4cde8f498ad19c1c5f95f7fc572752",
"index": 6995,
"step-1": "<mask token>\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n",
"step-2": "<mask token>\n\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n print('############# OPENING SAM FILE', file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count['A']\n num_c += bases_count['C']\n num_g += bases_count['G']\n num_t += bases_count['T']\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (\n forward_reads + reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = (gene_annotation_match /\n gene_annotation_total)\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] -\n position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average,\n read_frequency, std_of_pos_diff, mean_of_pos_diffs,\n num_chromosomes, max_position_difference,\n min_position_difference, num_a / read_lengths_total, num_c /\n read_lengths_total, num_g / read_lengths_total, num_t /\n read_lengths_total]\n\n\n<mask token>\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n",
"step-3": "<mask token>\n\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n print('############# OPENING SAM FILE', file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count['A']\n num_c += bases_count['C']\n num_g += bases_count['G']\n num_t += bases_count['T']\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (\n forward_reads + reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = (gene_annotation_match /\n gene_annotation_total)\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] -\n position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average,\n read_frequency, std_of_pos_diff, mean_of_pos_diffs,\n num_chromosomes, max_position_difference,\n min_position_difference, num_a / read_lengths_total, num_c /\n read_lengths_total, num_g / read_lengths_total, num_t /\n read_lengths_total]\n\n\ndef parseString(txt, frequency_tree):\n spliter = re.compile('\\n+')\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n lines = spliter.split(txt)\n for i in range(len(lines) - 1):\n subline = line_spliter.split(lines[i])\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads +\n reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i]\n )\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency,\n std_of_pos_diff, mean_of_pos_diffs, num_chromosomes,\n max_position_difference, min_position_difference]\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n",
"step-4": "from collections import defaultdict, Counter\nimport numpy as np\nimport sys\nimport re\n\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n print('############# OPENING SAM FILE', file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count['A']\n num_c += bases_count['C']\n num_g += bases_count['G']\n num_t += bases_count['T']\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (\n forward_reads + reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = (gene_annotation_match /\n gene_annotation_total)\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] -\n position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average,\n read_frequency, std_of_pos_diff, mean_of_pos_diffs,\n num_chromosomes, max_position_difference,\n min_position_difference, num_a / read_lengths_total, num_c /\n read_lengths_total, num_g / read_lengths_total, num_t /\n read_lengths_total]\n\n\ndef parseString(txt, frequency_tree):\n spliter = re.compile('\\n+')\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n lines = spliter.split(txt)\n for i in range(len(lines) - 1):\n subline = line_spliter.split(lines[i])\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads +\n reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i]\n )\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency,\n std_of_pos_diff, mean_of_pos_diffs, num_chromosomes,\n max_position_difference, min_position_difference]\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n",
"step-5": "from collections import defaultdict, Counter\nimport numpy as np\nimport sys\nimport re\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n\n\n print(\"############# OPENING SAM FILE\", file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if (int(subline[1]) & 4 == 4):\n unmatched_reads += 1\n elif (int(subline[1]) & 16 == 16):\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count[\"A\"]\n num_c += bases_count[\"C\"]\n num_g += bases_count[\"G\"]\n num_t += bases_count[\"T\"]\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if (forward_reads + reverse_reads + unmatched_reads) != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)\n\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n #TODO there is for sure a better way to do this than with a break\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print(\"gene_annotation_percent = \" + str(gene_annotation_percent))\n\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference, num_a/ read_lengths_total, num_c/ read_lengths_total, num_g / read_lengths_total, num_t / read_lengths_total]\n\n\n\ndef parseString(txt, frequency_tree):\n spliter = re.compile('\\n+')\n readnumber = re.compile('[r]+\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n\n lines = spliter.split(txt)\n #Itterating though everyline\n for i in range(len(lines) - 1):\n subline = line_spliter.split(lines[i])\n if (int(subline[1]) & 4 == 4):\n unmatched_reads += 1\n elif (int(subline[1]) & 16 == 16):\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if (forward_reads + reverse_reads + unmatched_reads) != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)\n\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n #TODO there is for sure a better way to do this than with a break\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print(\"gene_annotation_percent = \" + str(gene_annotation_percent))\n\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference]\n\ndef getChromosome(str):\n if str == \"*\" or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class ProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = ProyectoSistema.objects.all()
serializer_class = ProyectoSistemaSerializer
class UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = UsuarioProyectoSistema.objects.all()
serializer_class = UsuarioProyectoSistemaSerializer
class ProyectoSistemaView(View):
def get(self, request):
data = ProyectoSistema.objects.all()
json = serializers.serialize('json', data)
return HttpResponse(json, content_type='application/json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SistemaViewSet(viewsets.ModelViewSet):
queryset = Sistema.objects.all()
serializer_class = SistemaSerializer
class ProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = ProyectoSistema.objects.all()
serializer_class = ProyectoSistemaSerializer
class UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = UsuarioProyectoSistema.objects.all()
serializer_class = UsuarioProyectoSistemaSerializer
class ProyectoSistemaView(View):
def get(self, request):
data = ProyectoSistema.objects.all()
json = serializers.serialize('json', data)
return HttpResponse(json, content_type='application/json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UsuarioViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SistemaViewSet(viewsets.ModelViewSet):
queryset = Sistema.objects.all()
serializer_class = SistemaSerializer
class ProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = ProyectoSistema.objects.all()
serializer_class = ProyectoSistemaSerializer
class UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = UsuarioProyectoSistema.objects.all()
serializer_class = UsuarioProyectoSistemaSerializer
class ProyectoSistemaView(View):
def get(self, request):
data = ProyectoSistema.objects.all()
json = serializers.serialize('json', data)
return HttpResponse(json, content_type='application/json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProyectoViewSet(viewsets.ModelViewSet):
queryset = Proyecto.objects.all()
serializer_class = ProyectoSerializer
class UsuarioViewSet(viewsets.ModelViewSet):
queryset = Usuario.objects.all()
serializer_class = UsuariosSerializer
class SistemaViewSet(viewsets.ModelViewSet):
queryset = Sistema.objects.all()
serializer_class = SistemaSerializer
class ProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = ProyectoSistema.objects.all()
serializer_class = ProyectoSistemaSerializer
class UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = UsuarioProyectoSistema.objects.all()
serializer_class = UsuarioProyectoSistemaSerializer
class ProyectoSistemaView(View):
def get(self, request):
data = ProyectoSistema.objects.all()
json = serializers.serialize('json', data)
return HttpResponse(json, content_type='application/json')
<|reserved_special_token_1|>
from rest_framework import viewsets
from .models import *
from serializer import *
from django.http import HttpResponse
from django.views import View
from django.core import serializers
# Create your views here.
class ProyectoViewSet(viewsets.ModelViewSet):
queryset = Proyecto.objects.all()
serializer_class = ProyectoSerializer
class UsuarioViewSet(viewsets.ModelViewSet):
queryset = Usuario.objects.all()
serializer_class = UsuariosSerializer
class SistemaViewSet(viewsets.ModelViewSet):
queryset = Sistema.objects.all()
serializer_class = SistemaSerializer
class ProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = ProyectoSistema.objects.all()
serializer_class = ProyectoSistemaSerializer
class UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):
queryset = UsuarioProyectoSistema.objects.all()
serializer_class = UsuarioProyectoSistemaSerializer
class ProyectoSistemaView(View):
def get(self, request):
data = ProyectoSistema.objects.all()
json = serializers.serialize('json', data)
return HttpResponse(json, content_type='application/json')
|
flexible
|
{
"blob_id": "bedae2621bfcc64deb0d13d7cbce3cfb89720245",
"index": 4346,
"step-1": "<mask token>\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-2": "<mask token>\n\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-3": "<mask token>\n\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-4": "<mask token>\n\n\nclass ProyectoViewSet(viewsets.ModelViewSet):\n queryset = Proyecto.objects.all()\n serializer_class = ProyectoSerializer\n\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n queryset = Usuario.objects.all()\n serializer_class = UsuariosSerializer\n\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n",
"step-5": "from rest_framework import viewsets\nfrom .models import *\nfrom serializer import *\nfrom django.http import HttpResponse\nfrom django.views import View\nfrom django.core import serializers\n# Create your views here.\n\nclass ProyectoViewSet(viewsets.ModelViewSet):\n queryset = Proyecto.objects.all()\n serializer_class = ProyectoSerializer\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n queryset = Usuario.objects.all()\n serializer_class = UsuariosSerializer\n\nclass SistemaViewSet(viewsets.ModelViewSet):\n queryset = Sistema.objects.all()\n serializer_class = SistemaSerializer\n\nclass ProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = ProyectoSistema.objects.all()\n serializer_class = ProyectoSistemaSerializer\n\nclass UsuarioProyectoSistemaViewSet(viewsets.ModelViewSet):\n queryset = UsuarioProyectoSistema.objects.all()\n serializer_class = UsuarioProyectoSistemaSerializer\n\nclass ProyectoSistemaView(View):\n\n def get(self, request):\n data = ProyectoSistema.objects.all()\n json = serializers.serialize('json', data)\n return HttpResponse(json, content_type='application/json')\n\n",
"step-ids": [
6,
8,
9,
12,
14
]
}
|
[
6,
8,
9,
12,
14
] |
<|reserved_special_token_0|>
@api(canonical_alias='nncf.torch.create_compressed_model')
@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=
'config')])
def create_compressed_model(model: Module, config: NNCFConfig,
compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:
Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[
CompressionAlgorithmController, NNCFNetwork]:
"""
The main function used to produce a model ready for compression fine-tuning from an original PyTorch
model and a configuration object.
dummy_forward_fn
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:type config: nncf.NNCFConfig
:param compression_state: representation of the entire compression state to unambiguously restore
the compressed model. Includes builder and controller states.
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's
args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code
(see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's
input tensors that are important for compression are not supplied as arguments to the model's forward call
directly, but instead are located in a container (such as list), and the model receives the container as an
argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the
underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each
tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs
to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args
and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified
if dummy_forward_fn is specified.
:param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs
:param dump_graphs: Whether to dump the internal graph representation of the
original and compressed models in the .dot format into the log directory.
:return: A controller for the compression algorithm (or algorithms, in which case the controller
is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped
as an object of NNCFNetwork.
"""
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"""The model object has already been compressed.
NNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.
Make sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).
If you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call)."""
)
if config.get('target_device') == 'VPU':
warning_deprecated(
'VPU device is deprecated and will no longer be supported in the future.'
)
set_debug_log_dir(config.get('log_dir', '.'))
is_legacy_model_state_dict = (compression_state is not None and
BaseController.BUILDER_STATE not in compression_state and
BaseController.CONTROLLER_STATE not in compression_state)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn,
wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(
'log_dir', '.'), 'original_graph.dot'))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = (not is_legacy_model_state_dict and
compression_state is not None)
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.
CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state
state_dict_to_load = compression_state.get('state_dict',
compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get(
'log_dir', '.'), 'compressed_graph.dot'))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model
<|reserved_special_token_0|>
def create_compression_algorithm_builder_from_algo_names(algo_names: List[
str], config: NNCFConfig, should_init: bool
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param algo_names: list of algorithm names
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for
algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=
should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config,
should_init=should_init)
return builder
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api(canonical_alias='nncf.torch.create_compressed_model')
@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=
'config')])
def create_compressed_model(model: Module, config: NNCFConfig,
compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:
Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[
CompressionAlgorithmController, NNCFNetwork]:
"""
The main function used to produce a model ready for compression fine-tuning from an original PyTorch
model and a configuration object.
dummy_forward_fn
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:type config: nncf.NNCFConfig
:param compression_state: representation of the entire compression state to unambiguously restore
the compressed model. Includes builder and controller states.
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's
args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code
(see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's
input tensors that are important for compression are not supplied as arguments to the model's forward call
directly, but instead are located in a container (such as list), and the model receives the container as an
argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the
underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each
tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs
to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args
and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified
if dummy_forward_fn is specified.
:param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs
:param dump_graphs: Whether to dump the internal graph representation of the
original and compressed models in the .dot format into the log directory.
:return: A controller for the compression algorithm (or algorithms, in which case the controller
is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped
as an object of NNCFNetwork.
"""
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"""The model object has already been compressed.
NNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.
Make sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).
If you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call)."""
)
if config.get('target_device') == 'VPU':
warning_deprecated(
'VPU device is deprecated and will no longer be supported in the future.'
)
set_debug_log_dir(config.get('log_dir', '.'))
is_legacy_model_state_dict = (compression_state is not None and
BaseController.BUILDER_STATE not in compression_state and
BaseController.CONTROLLER_STATE not in compression_state)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn,
wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(
'log_dir', '.'), 'original_graph.dot'))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = (not is_legacy_model_state_dict and
compression_state is not None)
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.
CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state
state_dict_to_load = compression_state.get('state_dict',
compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get(
'log_dir', '.'), 'compressed_graph.dot'))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model
<|reserved_special_token_0|>
def create_compression_algorithm_builder(config: NNCFConfig, should_init=True
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names,
config, should_init)
def create_compression_algorithm_builder_from_algo_names(algo_names: List[
str], config: NNCFConfig, should_init: bool
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param algo_names: list of algorithm names
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for
algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=
should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config,
should_init=should_init)
return builder
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api(canonical_alias='nncf.torch.create_compressed_model')
@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=
'config')])
def create_compressed_model(model: Module, config: NNCFConfig,
compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:
Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[
CompressionAlgorithmController, NNCFNetwork]:
"""
The main function used to produce a model ready for compression fine-tuning from an original PyTorch
model and a configuration object.
dummy_forward_fn
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:type config: nncf.NNCFConfig
:param compression_state: representation of the entire compression state to unambiguously restore
the compressed model. Includes builder and controller states.
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's
args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code
(see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's
input tensors that are important for compression are not supplied as arguments to the model's forward call
directly, but instead are located in a container (such as list), and the model receives the container as an
argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the
underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each
tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs
to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args
and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified
if dummy_forward_fn is specified.
:param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs
:param dump_graphs: Whether to dump the internal graph representation of the
original and compressed models in the .dot format into the log directory.
:return: A controller for the compression algorithm (or algorithms, in which case the controller
is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped
as an object of NNCFNetwork.
"""
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"""The model object has already been compressed.
NNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.
Make sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).
If you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call)."""
)
if config.get('target_device') == 'VPU':
warning_deprecated(
'VPU device is deprecated and will no longer be supported in the future.'
)
set_debug_log_dir(config.get('log_dir', '.'))
is_legacy_model_state_dict = (compression_state is not None and
BaseController.BUILDER_STATE not in compression_state and
BaseController.CONTROLLER_STATE not in compression_state)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn,
wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(
'log_dir', '.'), 'original_graph.dot'))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = (not is_legacy_model_state_dict and
compression_state is not None)
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.
CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state
state_dict_to_load = compression_state.get('state_dict',
compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get(
'log_dir', '.'), 'compressed_graph.dot'))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model
def create_nncf_network(model: torch.nn.Module, config: NNCFConfig,
dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:
Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:
"""
The main function used to produce a model ready for adding compression from an original PyTorch
model and a configuration object.
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input
functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these
calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is
specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's input
tensors that are important for compression are not supplied as arguments to the model's forward call directly,
but instead are located in a container (such as list), and the model receives the container as an argument.
wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying
model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among
the supplied model's args and kwargs that is important for compression (e.g. quantization) with an
nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced
by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are
the same as were supplied in input, but each tensor in the original input. Must be specified if
dummy_forward_fn is specified.
:param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.
:return: A model wrapped by NNCFNetwork, which is ready for adding compression."""
if dummy_forward_fn is not None and wrap_inputs_fn is None:
raise ValueError(
'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'
)
with training_mode_switcher(model, is_training=False):
input_info_list = create_input_infos(config)
scopes_without_shape_matching = config.get(
'scopes_without_shape_matching', [])
ignored_scopes = config.get('ignored_scopes')
target_scopes = config.get('target_scopes')
nncf_network = NNCFNetwork(model, input_infos=input_info_list,
dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=
wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes
=ignored_scopes, target_scopes=target_scopes,
scopes_without_shape_matching=scopes_without_shape_matching)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
synchronize_all_processes_in_distributed_mode()
return nncf_network
def synchronize_all_processes_in_distributed_mode():
if is_dist_avail_and_initialized():
try:
barrier()
except RuntimeError as err:
nncf_logger.warning(
'Training pipeline spawned an error while synchronizing distributed training processes:'
)
nncf_logger.warning(err)
nncf_logger.warning(
'Desynchronization of distributed processes may occur.')
def create_compression_algorithm_builder(config: NNCFConfig, should_init=True
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names,
config, should_init)
def create_compression_algorithm_builder_from_algo_names(algo_names: List[
str], config: NNCFConfig, should_init: bool
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param algo_names: list of algorithm names
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for
algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=
should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config,
should_init=should_init)
return builder
<|reserved_special_token_1|>
from os import path as osp
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch.distributed import barrier
from torch.nn import Module
from nncf.api.compression import CompressionAlgorithmController
from nncf.common.compression import BaseCompressionAlgorithmController as BaseController
from nncf.common.deprecation import warning_deprecated
from nncf.common.logging import nncf_logger
from nncf.common.utils.api_marker import api
from nncf.common.utils.debug import set_debug_log_dir
from nncf.config import NNCFConfig
from nncf.config.extractors import extract_algorithm_names
from nncf.config.telemetry_extractors import CompressionStartedFromConfig
from nncf.telemetry import tracked_function
from nncf.telemetry.events import NNCF_PT_CATEGORY
from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS
from nncf.torch.algo_selector import NoCompressionAlgorithmBuilder
from nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder
from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder
from nncf.torch.dynamic_graph.graph_tracer import create_input_infos
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.utils import is_dist_avail_and_initialized
from nncf.torch.utils import is_main_process
from nncf.torch.utils import maybe_convert_legacy_names_in_compress_state
from nncf.torch.utils import training_mode_switcher
@api(canonical_alias='nncf.torch.create_compressed_model')
@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=
'config')])
def create_compressed_model(model: Module, config: NNCFConfig,
compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:
Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],
Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[
CompressionAlgorithmController, NNCFNetwork]:
"""
The main function used to produce a model ready for compression fine-tuning from an original PyTorch
model and a configuration object.
dummy_forward_fn
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:type config: nncf.NNCFConfig
:param compression_state: representation of the entire compression state to unambiguously restore
the compressed model. Includes builder and controller states.
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's
args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code
(see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's
input tensors that are important for compression are not supplied as arguments to the model's forward call
directly, but instead are located in a container (such as list), and the model receives the container as an
argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the
underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each
tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs
to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args
and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified
if dummy_forward_fn is specified.
:param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs
:param dump_graphs: Whether to dump the internal graph representation of the
original and compressed models in the .dot format into the log directory.
:return: A controller for the compression algorithm (or algorithms, in which case the controller
is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped
as an object of NNCFNetwork.
"""
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"""The model object has already been compressed.
NNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.
Make sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).
If you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call)."""
)
if config.get('target_device') == 'VPU':
warning_deprecated(
'VPU device is deprecated and will no longer be supported in the future.'
)
set_debug_log_dir(config.get('log_dir', '.'))
is_legacy_model_state_dict = (compression_state is not None and
BaseController.BUILDER_STATE not in compression_state and
BaseController.CONTROLLER_STATE not in compression_state)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn,
wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(
'log_dir', '.'), 'original_graph.dot'))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = (not is_legacy_model_state_dict and
compression_state is not None)
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.
CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state
state_dict_to_load = compression_state.get('state_dict',
compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get(
'log_dir', '.'), 'compressed_graph.dot'))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model
def create_nncf_network(model: torch.nn.Module, config: NNCFConfig,
dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:
Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:
"""
The main function used to produce a model ready for adding compression from an original PyTorch
model and a configuration object.
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input
functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these
calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is
specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's input
tensors that are important for compression are not supplied as arguments to the model's forward call directly,
but instead are located in a container (such as list), and the model receives the container as an argument.
wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying
model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among
the supplied model's args and kwargs that is important for compression (e.g. quantization) with an
nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced
by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are
the same as were supplied in input, but each tensor in the original input. Must be specified if
dummy_forward_fn is specified.
:param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.
:return: A model wrapped by NNCFNetwork, which is ready for adding compression."""
if dummy_forward_fn is not None and wrap_inputs_fn is None:
raise ValueError(
'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'
)
with training_mode_switcher(model, is_training=False):
input_info_list = create_input_infos(config)
scopes_without_shape_matching = config.get(
'scopes_without_shape_matching', [])
ignored_scopes = config.get('ignored_scopes')
target_scopes = config.get('target_scopes')
nncf_network = NNCFNetwork(model, input_infos=input_info_list,
dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=
wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes
=ignored_scopes, target_scopes=target_scopes,
scopes_without_shape_matching=scopes_without_shape_matching)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
synchronize_all_processes_in_distributed_mode()
return nncf_network
def synchronize_all_processes_in_distributed_mode():
if is_dist_avail_and_initialized():
try:
barrier()
except RuntimeError as err:
nncf_logger.warning(
'Training pipeline spawned an error while synchronizing distributed training processes:'
)
nncf_logger.warning(err)
nncf_logger.warning(
'Desynchronization of distributed processes may occur.')
def create_compression_algorithm_builder(config: NNCFConfig, should_init=True
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names,
config, should_init)
def create_compression_algorithm_builder_from_algo_names(algo_names: List[
str], config: NNCFConfig, should_init: bool
) ->PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param algo_names: list of algorithm names
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for
algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=
should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config,
should_init=should_init)
return builder
<|reserved_special_token_1|>
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path as osp
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch.distributed import barrier
from torch.nn import Module
from nncf.api.compression import CompressionAlgorithmController
from nncf.common.compression import BaseCompressionAlgorithmController as BaseController
from nncf.common.deprecation import warning_deprecated
from nncf.common.logging import nncf_logger
from nncf.common.utils.api_marker import api
from nncf.common.utils.debug import set_debug_log_dir
from nncf.config import NNCFConfig
from nncf.config.extractors import extract_algorithm_names
from nncf.config.telemetry_extractors import CompressionStartedFromConfig
from nncf.telemetry import tracked_function
from nncf.telemetry.events import NNCF_PT_CATEGORY
from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS
from nncf.torch.algo_selector import NoCompressionAlgorithmBuilder
from nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder
from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder
from nncf.torch.dynamic_graph.graph_tracer import create_input_infos
from nncf.torch.nncf_network import NNCFNetwork
# pylint:disable=too-many-branches
from nncf.torch.utils import is_dist_avail_and_initialized
from nncf.torch.utils import is_main_process
from nncf.torch.utils import maybe_convert_legacy_names_in_compress_state
from nncf.torch.utils import training_mode_switcher
@api(canonical_alias="nncf.torch.create_compressed_model")
@tracked_function(
NNCF_PT_CATEGORY,
[
CompressionStartedFromConfig(argname="config"),
],
)
def create_compressed_model(
model: Module,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None,
dummy_forward_fn: Callable[[Module], Any] = None,
wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
dump_graphs=True,
) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:
"""
The main function used to produce a model ready for compression fine-tuning from an original PyTorch
model and a configuration object.
dummy_forward_fn
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:type config: nncf.NNCFConfig
:param compression_state: representation of the entire compression state to unambiguously restore
the compressed model. Includes builder and controller states.
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's
args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code
(see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's
input tensors that are important for compression are not supplied as arguments to the model's forward call
directly, but instead are located in a container (such as list), and the model receives the container as an
argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the
underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each
tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs
to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args
and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified
if dummy_forward_fn is specified.
:param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs
:param dump_graphs: Whether to dump the internal graph representation of the
original and compressed models in the .dot format into the log directory.
:return: A controller for the compression algorithm (or algorithms, in which case the controller
is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped
as an object of NNCFNetwork.
"""
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"The model object has already been compressed.\n"
"NNCF for PyTorch modifies the model object in-place, and repeat calls to "
"`nncf.torch.create_compressed_model` with the same model object passed as argument "
"will lead to an incorrect attempt to compress the model twice.\n"
"Make sure that the model object you are passing has not already been compressed (for "
"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\n"
"If you are encountering this in a Jupyter notebook context - make sure that when "
"re-running cells involving `nncf.torch.create_compressed_model` the original model object "
"is also re-created (via constructor call)."
)
if config.get("target_device") == "VPU":
warning_deprecated("VPU device is deprecated and will no longer be supported in the future.")
set_debug_log_dir(config.get("log_dir", "."))
is_legacy_model_state_dict = (
compression_state is not None
and BaseController.BUILDER_STATE not in compression_state
and BaseController.CONTROLLER_STATE not in compression_state
)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get("log_dir", "."), "original_graph.dot"))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = not is_legacy_model_state_dict and compression_state is not None
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
# Required to ensure that the model leaving create_compressed_model has correct compressed graph.
# In particular, this is currently required for correct functioning of RNNs.
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state # pylint: disable=cyclic-import
state_dict_to_load = compression_state.get("state_dict", compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get("log_dir", "."), "compressed_graph.dot"))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model
def create_nncf_network(
model: torch.nn.Module,
config: NNCFConfig,
dummy_forward_fn: Callable[[Module], Any] = None,
wrap_inputs_fn: Callable = None,
wrap_outputs_fn: Callable = None,
) -> NNCFNetwork:
"""
The main function used to produce a model ready for adding compression from an original PyTorch
model and a configuration object.
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
source.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
the internal graph representation via tracing. Specifying this is useful when the original training pipeline
has special formats of data loader output or has additional *forward* arguments other than input tensors.
Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
nncf.nncf_model_input
functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these
calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is
specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
forward call before passing the inputs to the underlying compressed model. This is required if the model's input
tensors that are important for compression are not supplied as arguments to the model's forward call directly,
but instead are located in a container (such as list), and the model receives the container as an argument.
wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying
model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among
the supplied model's args and kwargs that is important for compression (e.g. quantization) with an
nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced
by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are
the same as were supplied in input, but each tensor in the original input. Must be specified if
dummy_forward_fn is specified.
:param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.
:return: A model wrapped by NNCFNetwork, which is ready for adding compression."""
if dummy_forward_fn is not None and wrap_inputs_fn is None:
raise ValueError(
"A custom dummy forward function was specified, but the corresponding input wrapping function "
"was not. In case a custom dummy forward function is specified for purposes of NNCF graph "
"building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with "
"the input wrapping done in dummy_forward_fn."
)
# Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode
with training_mode_switcher(model, is_training=False):
# Compress model that will be deployed for the inference on target device. No need to compress parts of the
# model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with
# weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode.
input_info_list = create_input_infos(config)
scopes_without_shape_matching = config.get("scopes_without_shape_matching", [])
ignored_scopes = config.get("ignored_scopes")
target_scopes = config.get("target_scopes")
nncf_network = NNCFNetwork(
model,
input_infos=input_info_list,
dummy_forward_fn=dummy_forward_fn,
wrap_inputs_fn=wrap_inputs_fn,
wrap_outputs_fn=wrap_outputs_fn,
ignored_scopes=ignored_scopes,
target_scopes=target_scopes,
scopes_without_shape_matching=scopes_without_shape_matching,
)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
synchronize_all_processes_in_distributed_mode()
return nncf_network
def synchronize_all_processes_in_distributed_mode():
if is_dist_avail_and_initialized():
try:
barrier()
# Exception can be raised during running barrier
# if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html
except RuntimeError as err:
nncf_logger.warning(
"Training pipeline spawned an error while synchronizing distributed training processes:"
)
nncf_logger.warning(err)
nncf_logger.warning("Desynchronization of distributed processes may occur.")
def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)
def create_compression_algorithm_builder_from_algo_names(
algo_names: List[str], config: NNCFConfig, should_init: bool
) -> PTCompressionAlgorithmBuilder:
"""
Create compression algorithm builders by a given list of algorithm names.
:param algo_names: list of algorithm names
:param config: A configuration object used to determine the exact compression modifications to be applied
to the model
:param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)
the training parameters of the model during model building.
:return: compression algorithm builder
"""
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)
return builder
|
flexible
|
{
"blob_id": "cd1ada2d7979fffc17f707ed113efde7aa134954",
"index": 3036,
"step-1": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\n<mask token>\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n",
"step-2": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\n<mask token>\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n",
"step-3": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(model: torch.nn.Module, config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:\n Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n 'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'\n )\n with training_mode_switcher(model, is_training=False):\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\n 'scopes_without_shape_matching', [])\n ignored_scopes = config.get('ignored_scopes')\n target_scopes = config.get('target_scopes')\n nncf_network = NNCFNetwork(model, input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=\n wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes\n =ignored_scopes, target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching)\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n except RuntimeError as err:\n nncf_logger.warning(\n 'Training pipeline spawned an error while synchronizing distributed training processes:'\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\n 'Desynchronization of distributed processes may occur.')\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n",
"step-4": "from os import path as osp\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\nimport torch\nfrom torch.distributed import barrier\nfrom torch.nn import Module\nfrom nncf.api.compression import CompressionAlgorithmController\nfrom nncf.common.compression import BaseCompressionAlgorithmController as BaseController\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.utils.api_marker import api\nfrom nncf.common.utils.debug import set_debug_log_dir\nfrom nncf.config import NNCFConfig\nfrom nncf.config.extractors import extract_algorithm_names\nfrom nncf.config.telemetry_extractors import CompressionStartedFromConfig\nfrom nncf.telemetry import tracked_function\nfrom nncf.telemetry.events import NNCF_PT_CATEGORY\nfrom nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS\nfrom nncf.torch.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom nncf.torch.nncf_network import NNCFNetwork\nfrom nncf.torch.utils import is_dist_avail_and_initialized\nfrom nncf.torch.utils import is_main_process\nfrom nncf.torch.utils import maybe_convert_legacy_names_in_compress_state\nfrom nncf.torch.utils import training_mode_switcher\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(model: torch.nn.Module, config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:\n Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n 'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'\n )\n with training_mode_switcher(model, is_training=False):\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\n 'scopes_without_shape_matching', [])\n ignored_scopes = config.get('ignored_scopes')\n target_scopes = config.get('target_scopes')\n nncf_network = NNCFNetwork(model, input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=\n wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes\n =ignored_scopes, target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching)\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n except RuntimeError as err:\n nncf_logger.warning(\n 'Training pipeline spawned an error while synchronizing distributed training processes:'\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\n 'Desynchronization of distributed processes may occur.')\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n",
"step-5": "# Copyright (c) 2023 Intel Corporation\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import path as osp\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport torch\nfrom torch.distributed import barrier\nfrom torch.nn import Module\n\nfrom nncf.api.compression import CompressionAlgorithmController\nfrom nncf.common.compression import BaseCompressionAlgorithmController as BaseController\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.utils.api_marker import api\nfrom nncf.common.utils.debug import set_debug_log_dir\nfrom nncf.config import NNCFConfig\nfrom nncf.config.extractors import extract_algorithm_names\nfrom nncf.config.telemetry_extractors import CompressionStartedFromConfig\nfrom nncf.telemetry import tracked_function\nfrom nncf.telemetry.events import NNCF_PT_CATEGORY\nfrom nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS\nfrom nncf.torch.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom nncf.torch.nncf_network import NNCFNetwork\n\n# pylint:disable=too-many-branches\nfrom nncf.torch.utils import is_dist_avail_and_initialized\nfrom nncf.torch.utils import is_main_process\nfrom nncf.torch.utils import maybe_convert_legacy_names_in_compress_state\nfrom nncf.torch.utils import training_mode_switcher\n\n\n@api(canonical_alias=\"nncf.torch.create_compressed_model\")\n@tracked_function(\n NNCF_PT_CATEGORY,\n [\n CompressionStartedFromConfig(argname=\"config\"),\n ],\n)\ndef create_compressed_model(\n model: Module,\n config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]] = None,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n dump_graphs=True,\n) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"The model object has already been compressed.\\n\"\n \"NNCF for PyTorch modifies the model object in-place, and repeat calls to \"\n \"`nncf.torch.create_compressed_model` with the same model object passed as argument \"\n \"will lead to an incorrect attempt to compress the model twice.\\n\"\n \"Make sure that the model object you are passing has not already been compressed (for \"\n \"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\\n\"\n \"If you are encountering this in a Jupyter notebook context - make sure that when \"\n \"re-running cells involving `nncf.torch.create_compressed_model` the original model object \"\n \"is also re-created (via constructor call).\"\n )\n\n if config.get(\"target_device\") == \"VPU\":\n warning_deprecated(\"VPU device is deprecated and will no longer be supported in the future.\")\n\n set_debug_log_dir(config.get(\"log_dir\", \".\"))\n\n is_legacy_model_state_dict = (\n compression_state is not None\n and BaseController.BUILDER_STATE not in compression_state\n and BaseController.CONTROLLER_STATE not in compression_state\n )\n maybe_convert_legacy_names_in_compress_state(compression_state)\n\n should_init = compression_state is None\n\n nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)\n\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"original_graph.dot\"))\n builder = create_compression_algorithm_builder(config, should_init)\n\n is_state_loadable = not is_legacy_model_state_dict and compression_state is not None\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])\n\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n\n # Required to ensure that the model leaving create_compressed_model has correct compressed graph.\n # In particular, this is currently required for correct functioning of RNNs.\n compressed_model.nncf.rebuild_graph()\n\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state # pylint: disable=cyclic-import\n\n state_dict_to_load = compression_state.get(\"state_dict\", compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"compressed_graph.dot\"))\n\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(\n model: torch.nn.Module,\n config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable = None,\n wrap_outputs_fn: Callable = None,\n) -> NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n \"A custom dummy forward function was specified, but the corresponding input wrapping function \"\n \"was not. In case a custom dummy forward function is specified for purposes of NNCF graph \"\n \"building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with \"\n \"the input wrapping done in dummy_forward_fn.\"\n )\n\n # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode\n with training_mode_switcher(model, is_training=False):\n # Compress model that will be deployed for the inference on target device. No need to compress parts of the\n # model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with\n # weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode.\n\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\"scopes_without_shape_matching\", [])\n ignored_scopes = config.get(\"ignored_scopes\")\n target_scopes = config.get(\"target_scopes\")\n\n nncf_network = NNCFNetwork(\n model,\n input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn,\n wrap_inputs_fn=wrap_inputs_fn,\n wrap_outputs_fn=wrap_outputs_fn,\n ignored_scopes=ignored_scopes,\n target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching,\n )\n\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n # Exception can be raised during running barrier\n # if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html\n except RuntimeError as err:\n nncf_logger.warning(\n \"Training pipeline spawned an error while synchronizing distributed training processes:\"\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\"Desynchronization of distributed processes may occur.\")\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(\n algo_names: List[str], config: NNCFConfig, should_init: bool\n) -> PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)\n return builder\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
DEFAULT_SIZE = 512
class DataEncoding:
@staticmethod
def segment_decode(segment):
arr = bytearray(segment)
ack_binary = bytearray([arr[i] for i in range(4)])
tip_binary = bytearray([arr[4]])
len_binary = bytearray([arr[i] for i in (5,6)])
ack = int.from_bytes(ack_binary, byteorder='big', signed=False)
tip = int.from_bytes(tip_binary, byteorder='big', signed=False)
length = int.from_bytes(len_binary, byteorder='big', signed=False)
data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\x00'])
return {'ack': ack, 'tip': tip, 'len': length, 'data': data}
# codificare: (segment_number, segment_type, segment_len), segment_data
# creeaza primul pachet, cel care contine numele
@staticmethod
def encode_start(transmitter,nume_fisier):
transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1
segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)
segment_type = b'\x01'
lungime_nume = len(nume_fisier)
segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)
segment = segment_number + segment_type + segment_len
for ch in nume_fisier:
segment += (ord(ch).to_bytes(1, byteorder='big', signed=False))
return segment
# creeaza pachetele care contine bitii din fisier
@staticmethod
def encode_data(transmitter,segment_data):
transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1
segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)
segment_type = b'\x02'
segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)
segment = segment_number + segment_type + segment_len + segment_data
return segment
# in campul de segment_code, al doilea octet va fi lungimea caracterelor utile
@staticmethod
def encode_end(transmitter,segment_data):
global end_transmission
transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1
segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)
segment_type = b'\x03'
segment_data_len = len(segment_data)
segment_data = segment_data + b'\x00'*(DEFAULT_SIZE - segment_data_len)
segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=False)
segment = segment_number + segment_type + segment_len + segment_data
return segment
@staticmethod
def encode_error(transmitter,segment_data):
pass
@staticmethod
def encode(transmitter,tip, data):
segment_type = {
'START': DataEncoding.encode_start,
'DATA' : DataEncoding.encode_data,
'END' : DataEncoding.encode_end
}
return segment_type.get(tip, DataEncoding.encode_error)(transmitter,data)
#citirea fisier ca pachete de octeti
@staticmethod
def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):
with open(transmitter.filepath, "rb") as f:
while True:
chunk = f.read(chunk_size)
if chunk:
yield chunk
else:
break
#codificarea pachetelor de octeti
@staticmethod
def encode_bytes(transmitter):
for b in DataEncoding.bytes_from_file(transmitter.filepath):
if len(b) == DEFAULT_SIZE:
yield DataEncoding.encode(transmitter,'DATA', b)
else:
yield DataEncoding.encode(transmitter,'END', b)
|
normal
|
{
"blob_id": "47c5375816ab35e8225e5f3695f7ee2ab5336076",
"index": 4312,
"step-1": "<mask token>\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n <mask token>\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n <mask token>\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n\n @staticmethod\n def encode_start(transmitter, nume_fisier):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x01'\n lungime_nume = len(nume_fisier)\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len\n for ch in nume_fisier:\n segment += ord(ch).to_bytes(1, byteorder='big', signed=False)\n return segment\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n <mask token>\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n\n @staticmethod\n def encode_bytes(transmitter):\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\n if len(b) == DEFAULT_SIZE:\n yield DataEncoding.encode(transmitter, 'DATA', b)\n else:\n yield DataEncoding.encode(transmitter, 'END', b)\n",
"step-3": "<mask token>\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n\n @staticmethod\n def encode_start(transmitter, nume_fisier):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x01'\n lungime_nume = len(nume_fisier)\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len\n for ch in nume_fisier:\n segment += ord(ch).to_bytes(1, byteorder='big', signed=False)\n return segment\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_error(transmitter, segment_data):\n pass\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n\n @staticmethod\n def encode_bytes(transmitter):\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\n if len(b) == DEFAULT_SIZE:\n yield DataEncoding.encode(transmitter, 'DATA', b)\n else:\n yield DataEncoding.encode(transmitter, 'END', b)\n",
"step-4": "DEFAULT_SIZE = 512\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n\n @staticmethod\n def encode_start(transmitter, nume_fisier):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x01'\n lungime_nume = len(nume_fisier)\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len\n for ch in nume_fisier:\n segment += ord(ch).to_bytes(1, byteorder='big', signed=False)\n return segment\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_error(transmitter, segment_data):\n pass\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n\n @staticmethod\n def encode_bytes(transmitter):\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\n if len(b) == DEFAULT_SIZE:\n yield DataEncoding.encode(transmitter, 'DATA', b)\n else:\n yield DataEncoding.encode(transmitter, 'END', b)\n",
"step-5": "DEFAULT_SIZE = 512\r\n\r\nclass DataEncoding:\r\n @staticmethod\r\n def segment_decode(segment):\r\n arr = bytearray(segment)\r\n ack_binary = bytearray([arr[i] for i in range(4)])\r\n tip_binary = bytearray([arr[4]])\r\n len_binary = bytearray([arr[i] for i in (5,6)])\r\n\r\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\r\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\r\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\r\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\\x00'])\r\n\r\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\r\n\r\n\r\n # codificare: (segment_number, segment_type, segment_len), segment_data\r\n # creeaza primul pachet, cel care contine numele\r\n @staticmethod\r\n def encode_start(transmitter,nume_fisier):\r\n transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1\r\n segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)\r\n\r\n segment_type = b'\\x01'\r\n lungime_nume = len(nume_fisier)\r\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\r\n segment = segment_number + segment_type + segment_len\r\n\r\n for ch in nume_fisier:\r\n segment += (ord(ch).to_bytes(1, byteorder='big', signed=False))\r\n\r\n return segment\r\n\r\n\r\n # creeaza pachetele care contine bitii din fisier\r\n @staticmethod\r\n def encode_data(transmitter,segment_data):\r\n transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1\r\n segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)\r\n\r\n segment_type = b'\\x02'\r\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\r\n segment = segment_number + segment_type + segment_len + segment_data\r\n\r\n return segment\r\n\r\n # in campul de segment_code, al doilea octet va fi lungimea caracterelor utile\r\n @staticmethod\r\n def encode_end(transmitter,segment_data):\r\n global end_transmission\r\n transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1\r\n segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)\r\n\r\n segment_type = b'\\x03'\r\n segment_data_len = len(segment_data)\r\n segment_data = segment_data + b'\\x00'*(DEFAULT_SIZE - segment_data_len)\r\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=False)\r\n segment = segment_number + segment_type + segment_len + segment_data\r\n\r\n return segment\r\n\r\n @staticmethod\r\n def encode_error(transmitter,segment_data):\r\n pass\r\n\r\n\r\n @staticmethod\r\n def encode(transmitter,tip, data):\r\n segment_type = {\r\n 'START': DataEncoding.encode_start,\r\n 'DATA' : DataEncoding.encode_data,\r\n 'END' : DataEncoding.encode_end\r\n }\r\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,data)\r\n\r\n\r\n #citirea fisier ca pachete de octeti\r\n @staticmethod\r\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\r\n with open(transmitter.filepath, \"rb\") as f:\r\n while True:\r\n chunk = f.read(chunk_size)\r\n if chunk:\r\n yield chunk\r\n else:\r\n break\r\n\r\n #codificarea pachetelor de octeti\r\n @staticmethod\r\n def encode_bytes(transmitter):\r\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\r\n if len(b) == DEFAULT_SIZE:\r\n yield DataEncoding.encode(transmitter,'DATA', b)\r\n else:\r\n yield DataEncoding.encode(transmitter,'END', b)\r\n\r\n\r\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class TestFileReadingFunctions(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(
__file__)), 'data')
self.one_word_per_line_path = os.path.join(self.data_dir,
'one_word_per_line.txt')
self.one_sent_per_line_path = os.path.join(self.data_dir,
'one_sent_per_line.txt')
self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')
self.word_cnt_path_list = [self.one_sent_per_line_path, self.
one_word_per_line_path]
self.logger = logging.getLogger('ReadingFunctions Test Logger')
def test_token_cnt(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
<|reserved_special_token_0|>
def test_load_token_id(self):
token2id, id2token = token_util.load_token_id(self.token2id_path)
self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,
'UNK': 4})
self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFileReadingFunctions(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(
__file__)), 'data')
self.one_word_per_line_path = os.path.join(self.data_dir,
'one_word_per_line.txt')
self.one_sent_per_line_path = os.path.join(self.data_dir,
'one_sent_per_line.txt')
self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')
self.word_cnt_path_list = [self.one_sent_per_line_path, self.
one_word_per_line_path]
self.logger = logging.getLogger('ReadingFunctions Test Logger')
def test_token_cnt(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
def test_gen_token_id_from_file(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,
max_size=-1, separator=None)
self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'
], delta=2)
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-
1, max_size=3, separator=None)
self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)
def test_load_token_id(self):
token2id, id2token = token_util.load_token_id(self.token2id_path)
self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,
'UNK': 4})
self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFileReadingFunctions(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(
__file__)), 'data')
self.one_word_per_line_path = os.path.join(self.data_dir,
'one_word_per_line.txt')
self.one_sent_per_line_path = os.path.join(self.data_dir,
'one_sent_per_line.txt')
self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')
self.word_cnt_path_list = [self.one_sent_per_line_path, self.
one_word_per_line_path]
self.logger = logging.getLogger('ReadingFunctions Test Logger')
def test_token_cnt(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
def test_gen_token_id_from_file(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,
max_size=-1, separator=None)
self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'
], delta=2)
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-
1, max_size=3, separator=None)
self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)
def test_load_token_id(self):
token2id, id2token = token_util.load_token_id(self.token2id_path)
self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,
'UNK': 4})
self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import unittest
import logging
from collections import Counter
from utility import token_util
class TestFileReadingFunctions(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(
__file__)), 'data')
self.one_word_per_line_path = os.path.join(self.data_dir,
'one_word_per_line.txt')
self.one_sent_per_line_path = os.path.join(self.data_dir,
'one_sent_per_line.txt')
self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')
self.word_cnt_path_list = [self.one_sent_per_line_path, self.
one_word_per_line_path]
self.logger = logging.getLogger('ReadingFunctions Test Logger')
def test_token_cnt(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=1, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path
], separator=None, workers=3, parallel_mode='size')
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='size')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=1,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,
self.one_sent_per_line_path], separator=None, workers=3,
parallel_mode='file')
self.assertEqual(c, one_word_per_line_counter +
one_sent_per_line_counter)
def test_gen_token_id_from_file(self):
one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4})
one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,
'd_4': 4, 'e_5': 5, 'f_6': 6})
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,
min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ['f_6', 'e_5'])
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,
max_size=-1, separator=None)
self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'
], delta=2)
res_list = token_util.gen_token_id_from_file([self.
one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-
1, max_size=3, separator=None)
self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)
def test_load_token_id(self):
token2id, id2token = token_util.load_token_id(self.token2id_path)
self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,
'UNK': 4})
self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import unittest
import logging
from collections import Counter
from utility import token_util
class TestFileReadingFunctions(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
self.one_word_per_line_path = os.path.join(self.data_dir, "one_word_per_line.txt")
self.one_sent_per_line_path = os.path.join(self.data_dir, "one_sent_per_line.txt")
self.token2id_path = os.path.join(self.data_dir, "token2id.txt")
self.word_cnt_path_list = [self.one_sent_per_line_path, self.one_word_per_line_path]
self.logger = logging.getLogger("ReadingFunctions Test Logger")
def test_token_cnt(self):
one_word_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4})
one_sent_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4, "e_5": 5, "f_6": 6})
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=1, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=3, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="size")
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="size")
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="file")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="file")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
def test_gen_token_id_from_file(self):
one_word_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4})
one_sent_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4, "e_5": 5, "f_6": 6})
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3", "b_2", "a_1"])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3"])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ["f_6", "e_5"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3", "b_2", "a_1"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ["f_6", "e_5"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertAlmostEqual(res_list, ["d_4", "f_6", "c_3", "e_5", "b_2"], delta=2)
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-1, max_size=3, separator=None)
self.assertAlmostEqual(res_list, ["d_4", "f_6", "c_3"], delta=2)
def test_load_token_id(self):
token2id, id2token = token_util.load_token_id(self.token2id_path)
self.assertEqual(token2id, {"a_0": 0, "b_1": 1, "c_2": 2, "d_3": 3, "UNK": 4})
self.assertEqual(id2token, ["a_0", "b_1", "c_2", "d_3", "UNK"])
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "7c3798aa9cc5424656572dfaa87f7acb961613eb",
"index": 8715,
"step-1": "<mask token>\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n <mask token>\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,\n max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'\n ], delta=2)\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-\n 1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,\n max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'\n ], delta=2)\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-\n 1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sys\nimport os\nimport unittest\nimport logging\nfrom collections import Counter\nfrom utility import token_util\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(\n __file__)), 'data')\n self.one_word_per_line_path = os.path.join(self.data_dir,\n 'one_word_per_line.txt')\n self.one_sent_per_line_path = os.path.join(self.data_dir,\n 'one_sent_per_line.txt')\n self.token2id_path = os.path.join(self.data_dir, 'token2id.txt')\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.\n one_word_per_line_path]\n self.logger = logging.getLogger('ReadingFunctions Test Logger')\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=1, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path\n ], separator=None, workers=3, parallel_mode='size')\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='size')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=1,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path,\n self.one_sent_per_line_path], separator=None, workers=3,\n parallel_mode='file')\n self.assertEqual(c, one_word_per_line_counter +\n one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4})\n one_sent_per_line_counter = Counter({'a_1': 1, 'b_2': 2, 'c_3': 3,\n 'd_4': 4, 'e_5': 5, 'f_6': 6})\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter,\n min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3', 'b_2', 'a_1'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5', 'd_4', 'c_3'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, ['f_6', 'e_5'])\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2,\n max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3', 'e_5', 'b_2'\n ], delta=2)\n res_list = token_util.gen_token_id_from_file([self.\n one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-\n 1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, ['d_4', 'f_6', 'c_3'], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {'a_0': 0, 'b_1': 1, 'c_2': 2, 'd_3': 3,\n 'UNK': 4})\n self.assertEqual(id2token, ['a_0', 'b_1', 'c_2', 'd_3', 'UNK'])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport unittest\nimport logging\nfrom collections import Counter\n\nfrom utility import token_util\n\n\nclass TestFileReadingFunctions(unittest.TestCase):\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n self.one_word_per_line_path = os.path.join(self.data_dir, \"one_word_per_line.txt\")\n self.one_sent_per_line_path = os.path.join(self.data_dir, \"one_sent_per_line.txt\")\n self.token2id_path = os.path.join(self.data_dir, \"token2id.txt\")\n self.word_cnt_path_list = [self.one_sent_per_line_path, self.one_word_per_line_path]\n\n self.logger = logging.getLogger(\"ReadingFunctions Test Logger\")\n\n def test_token_cnt(self):\n one_word_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4})\n one_sent_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4, \"e_5\": 5, \"f_6\": 6})\n\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=1, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=3, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter)\n\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=1, parallel_mode=\"size\")\n self.assertEqual(c, one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=3, parallel_mode=\"size\")\n self.assertEqual(c, one_sent_per_line_counter)\n\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode=\"size\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode=\"file\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode=\"file\")\n self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)\n\n def test_gen_token_id_from_file(self):\n one_word_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4})\n one_sent_per_line_counter = Counter({\"a_1\": 1, \"b_2\": 2, \"c_3\": 3, \"d_4\": 4, \"e_5\": 5, \"f_6\": 6})\n\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\", \"b_2\", \"a_1\"])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\"])\n res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\"])\n\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\", \"b_2\", \"a_1\"])\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\", \"d_4\", \"c_3\"])\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)\n self.assertEqual(res_list, [\"f_6\", \"e_5\"])\n\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2, max_size=-1, separator=None)\n self.assertAlmostEqual(res_list, [\"d_4\", \"f_6\", \"c_3\", \"e_5\", \"b_2\"], delta=2)\n res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-1, max_size=3, separator=None)\n self.assertAlmostEqual(res_list, [\"d_4\", \"f_6\", \"c_3\"], delta=2)\n\n def test_load_token_id(self):\n token2id, id2token = token_util.load_token_id(self.token2id_path)\n self.assertEqual(token2id, {\"a_0\": 0, \"b_1\": 1, \"c_2\": 2, \"d_3\": 3, \"UNK\": 4})\n self.assertEqual(id2token, [\"a_0\", \"b_1\", \"c_2\", \"d_3\", \"UNK\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('restapp', '0021_auto_20210320_1421')]
operations = [migrations.AddField(model_name='order', name='phone',
field=models.CharField(max_length=13, null=True)), migrations.
AlterField(model_name='order', name='order_no', field=models.
CharField(default='G2QYWH30', max_length=10))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('restapp', '0021_auto_20210320_1421')]
operations = [migrations.AddField(model_name='order', name='phone',
field=models.CharField(max_length=13, null=True)), migrations.
AlterField(model_name='order', name='order_no', field=models.
CharField(default='G2QYWH30', max_length=10))]
<|reserved_special_token_1|>
# Generated by Django 3.1.7 on 2021-03-20 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restapp', '0021_auto_20210320_1421'),
]
operations = [
migrations.AddField(
model_name='order',
name='phone',
field=models.CharField(max_length=13, null=True),
),
migrations.AlterField(
model_name='order',
name='order_no',
field=models.CharField(default='G2QYWH30', max_length=10),
),
]
|
flexible
|
{
"blob_id": "bf160bd2fc924a11d340bd466b4a879d1cdcd86e",
"index": 7639,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('restapp', '0021_auto_20210320_1421')]\n operations = [migrations.AddField(model_name='order', name='phone',\n field=models.CharField(max_length=13, null=True)), migrations.\n AlterField(model_name='order', name='order_no', field=models.\n CharField(default='G2QYWH30', max_length=10))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('restapp', '0021_auto_20210320_1421')]\n operations = [migrations.AddField(model_name='order', name='phone',\n field=models.CharField(max_length=13, null=True)), migrations.\n AlterField(model_name='order', name='order_no', field=models.\n CharField(default='G2QYWH30', max_length=10))]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-03-20 14:31\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restapp', '0021_auto_20210320_1421'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='phone',\n field=models.CharField(max_length=13, null=True),\n ),\n migrations.AlterField(\n model_name='order',\n name='order_no',\n field=models.CharField(default='G2QYWH30', max_length=10),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import (
StackingClassifier,
RandomForestClassifier
)
import pandas as pd
from sklearn.metrics import f1_score
# feel free to import any sklearn model here
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv("../../Data/breast_cancer_data/data.csv")
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y=='M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
def main():
np.random.seed(0)
train_X, train_y, test_X, test_y = load_data()
# Stacking models:
# Create your stacked model using StackingClassifier
base_models = [
('rfc', RandomForestClassifier()),
('svm', SVC()),
('gnb', GaussianNB()),
('knc', KNeighborsClassifier()),
('dtc', DecisionTreeClassifier())
]
# The default final_estimator is LogisticRegression
sc = StackingClassifier(estimators=base_models)
# fit the model on the training data
sc.fit(train_X, train_y)
# predict
y_pred = sc.predict(test_X)
# Get and print f1-score on test data
print(f"f1 score = {f1_score(y_pred, test_y , average = 'weighted')}")
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "cf65966f5daf88bdefc7a8aa2ff80835cff0d0b6",
"index": 4627,
"step-1": "<mask token>\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (\n 'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',\n DecisionTreeClassifier())]\n sc = StackingClassifier(estimators=base_models)\n sc.fit(train_X, train_y)\n y_pred = sc.predict(test_X)\n print(f\"f1 score = {f1_score(y_pred, test_y, average='weighted')}\")\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (\n 'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',\n DecisionTreeClassifier())]\n sc = StackingClassifier(estimators=base_models)\n sc.fit(train_X, train_y)\n y_pred = sc.predict(test_X)\n print(f\"f1 score = {f1_score(y_pred, test_y, average='weighted')}\")\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import StackingClassifier, RandomForestClassifier\nimport pandas as pd\nfrom sklearn.metrics import f1_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (\n 'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',\n DecisionTreeClassifier())]\n sc = StackingClassifier(estimators=base_models)\n sc.fit(train_X, train_y)\n y_pred = sc.predict(test_X)\n print(f\"f1 score = {f1_score(y_pred, test_y, average='weighted')}\")\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import (\n StackingClassifier,\n RandomForestClassifier\n) \nimport pandas as pd\nfrom sklearn.metrics import f1_score\n# feel free to import any sklearn model here\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n \n # Stacking models:\n # Create your stacked model using StackingClassifier\n base_models = [\n ('rfc', RandomForestClassifier()),\n ('svm', SVC()),\n ('gnb', GaussianNB()),\n ('knc', KNeighborsClassifier()),\n ('dtc', DecisionTreeClassifier())\n ]\n \n # The default final_estimator is LogisticRegression\n sc = StackingClassifier(estimators=base_models)\n\n # fit the model on the training data\n sc.fit(train_X, train_y)\n\n # predict\n y_pred = sc.predict(test_X)\n\n # Get and print f1-score on test data\n print(f\"f1 score = {f1_score(y_pred, test_y , average = 'weighted')}\")\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Brokerage(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Brokerage(models.Model):
BrokerageName = models.CharField(max_length=500)
ReviewLink = models.CharField(max_length=1000)
ContactLink = models.CharField(max_length=1000)
TotalAgents = models.IntegerField()
Location = models.CharField(max_length=500)
Desks = models.IntegerField()
YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)
CommisionSplit = models.CharField(max_length=8)
Cap = models.DecimalField(max_digits=12, decimal_places=2)
TrainingPerWeek = models.IntegerField()
Onboarding = models.BooleanField()
Mentorship = models.BooleanField()
Teams_Hiring = models.BooleanField()
Marketing = models.CharField(max_length=500)
TotalListings = models.IntegerField()
ConferenceRooms = models.BooleanField()
OfficeLeaders = models.CharField(max_length=500)
<|reserved_special_token_1|>
from django.db import models
class Brokerage(models.Model):
BrokerageName = models.CharField(max_length=500)
ReviewLink = models.CharField(max_length=1000)
ContactLink = models.CharField(max_length=1000)
TotalAgents = models.IntegerField()
Location = models.CharField(max_length=500)
Desks = models.IntegerField()
YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)
CommisionSplit = models.CharField(max_length=8)
Cap = models.DecimalField(max_digits=12, decimal_places=2)
TrainingPerWeek = models.IntegerField()
Onboarding = models.BooleanField()
Mentorship = models.BooleanField()
Teams_Hiring = models.BooleanField()
Marketing = models.CharField(max_length=500)
TotalListings = models.IntegerField()
ConferenceRooms = models.BooleanField()
OfficeLeaders = models.CharField(max_length=500)
<|reserved_special_token_1|>
from django.db import models
class Brokerage(models.Model):
BrokerageName = models.CharField(max_length=500)
#To-Do Fix additional settings for ImagesFields/FileFields
#BrokerageLogo = ImageField
ReviewLink = models.CharField(max_length=1000)
ContactLink = models.CharField(max_length=1000)
TotalAgents = models.IntegerField()
Location = models.CharField(max_length=500)
Desks = models.IntegerField()
YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)
CommisionSplit = models.CharField (max_length=8)
#To-Do set a getter for Cap that returns none
Cap = models.DecimalField(max_digits=12, decimal_places=2)
TrainingPerWeek = models.IntegerField()
Onboarding = models.BooleanField()
Mentorship = models.BooleanField()
Teams_Hiring = models.BooleanField()
Marketing = models.CharField(max_length=500)
TotalListings = models.IntegerField()
ConferenceRooms = models.BooleanField()
OfficeLeaders = models.CharField (max_length=500)
#OfficeLeaderPhoto = models.ImageField
|
flexible
|
{
"blob_id": "174f744b641ee20272713fa2fe1991cb2c76830a",
"index": 99,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Brokerage(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Brokerage(models.Model):\n BrokerageName = models.CharField(max_length=500)\n ReviewLink = models.CharField(max_length=1000)\n ContactLink = models.CharField(max_length=1000)\n TotalAgents = models.IntegerField()\n Location = models.CharField(max_length=500)\n Desks = models.IntegerField()\n YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)\n CommisionSplit = models.CharField(max_length=8)\n Cap = models.DecimalField(max_digits=12, decimal_places=2)\n TrainingPerWeek = models.IntegerField()\n Onboarding = models.BooleanField()\n Mentorship = models.BooleanField()\n Teams_Hiring = models.BooleanField()\n Marketing = models.CharField(max_length=500)\n TotalListings = models.IntegerField()\n ConferenceRooms = models.BooleanField()\n OfficeLeaders = models.CharField(max_length=500)\n",
"step-4": "from django.db import models\n\n\nclass Brokerage(models.Model):\n BrokerageName = models.CharField(max_length=500)\n ReviewLink = models.CharField(max_length=1000)\n ContactLink = models.CharField(max_length=1000)\n TotalAgents = models.IntegerField()\n Location = models.CharField(max_length=500)\n Desks = models.IntegerField()\n YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)\n CommisionSplit = models.CharField(max_length=8)\n Cap = models.DecimalField(max_digits=12, decimal_places=2)\n TrainingPerWeek = models.IntegerField()\n Onboarding = models.BooleanField()\n Mentorship = models.BooleanField()\n Teams_Hiring = models.BooleanField()\n Marketing = models.CharField(max_length=500)\n TotalListings = models.IntegerField()\n ConferenceRooms = models.BooleanField()\n OfficeLeaders = models.CharField(max_length=500)\n",
"step-5": "from django.db import models\n\nclass Brokerage(models.Model):\n\tBrokerageName = models.CharField(max_length=500)\n\t#To-Do Fix additional settings for ImagesFields/FileFields\n\t#BrokerageLogo = ImageField\n\tReviewLink = models.CharField(max_length=1000)\n\tContactLink = models.CharField(max_length=1000)\n\tTotalAgents = models.IntegerField()\n\tLocation = models.CharField(max_length=500)\n\tDesks = models.IntegerField()\n\tYearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)\n\tCommisionSplit = models.CharField (max_length=8)\n\t#To-Do set a getter for Cap that returns none\n\tCap = models.DecimalField(max_digits=12, decimal_places=2)\n\tTrainingPerWeek = models.IntegerField()\n\tOnboarding = models.BooleanField()\n\tMentorship = models.BooleanField()\n\tTeams_Hiring = models.BooleanField()\n\tMarketing = models.CharField(max_length=500)\n\tTotalListings = models.IntegerField()\n\tConferenceRooms = models.BooleanField()\n\tOfficeLeaders = models.CharField (max_length=500)\n\t#OfficeLeaderPhoto = models.ImageField\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@ddt
class ApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.keyword = Keyword()
cls.cookie = None
cls.confData = LoadIni('config.ini')
logger.info('----------用例开始执行----------')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@ddt
class ApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.keyword = Keyword()
cls.cookie = None
cls.confData = LoadIni('config.ini')
logger.info('----------用例开始执行----------')
@file_data('../data/data.yaml')
def test_1_login(self, username, password):
s = requests.Session()
loginUrl = self.confData.getConfig('urlConfig', 'login')
data = {'uname': username, 'upass': password, 'encode': 1}
res = s.post(url=loginUrl, data=data)
logger.info(res.text)
cookie = dict(res.cookies)
sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))
phpSession = 'PHP_SESSION=' + sess[0]
ApiTest.cookie = phpSession
logger.info('用例执行成功')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@ddt
class ApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.keyword = Keyword()
cls.cookie = None
cls.confData = LoadIni('config.ini')
logger.info('----------用例开始执行----------')
@file_data('../data/data.yaml')
def test_1_login(self, username, password):
s = requests.Session()
loginUrl = self.confData.getConfig('urlConfig', 'login')
data = {'uname': username, 'upass': password, 'encode': 1}
res = s.post(url=loginUrl, data=data)
logger.info(res.text)
cookie = dict(res.cookies)
sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))
phpSession = 'PHP_SESSION=' + sess[0]
ApiTest.cookie = phpSession
logger.info('用例执行成功')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
import jsonpath
import requests
from apiunittest.lib.loadIni import LoadIni
from apiunittest.keyword.keyword import Keyword
from apiunittest.lib.log import logger
from ddt import ddt, file_data
@ddt
class ApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.keyword = Keyword()
cls.cookie = None
cls.confData = LoadIni('config.ini')
logger.info('----------用例开始执行----------')
@file_data('../data/data.yaml')
def test_1_login(self, username, password):
s = requests.Session()
loginUrl = self.confData.getConfig('urlConfig', 'login')
data = {'uname': username, 'upass': password, 'encode': 1}
res = s.post(url=loginUrl, data=data)
logger.info(res.text)
cookie = dict(res.cookies)
sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))
phpSession = 'PHP_SESSION=' + sess[0]
ApiTest.cookie = phpSession
logger.info('用例执行成功')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
"""
测试用例
"""
import unittest
import jsonpath
import requests
from apiunittest.lib.loadIni import LoadIni
from apiunittest.keyword.keyword import Keyword
from apiunittest.lib.log import logger
from ddt import ddt, file_data
@ddt
class ApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.keyword = Keyword()
cls.cookie = None
cls.confData = LoadIni('config.ini')
logger.info('----------用例开始执行----------')
# 登录
@file_data('../data/data.yaml')
def test_1_login(self, username, password):
s = requests.Session()
loginUrl = self.confData.getConfig('urlConfig', 'login')
data = {
'uname': username,
'upass': password,
'encode': 1
}
res = s.post(url=loginUrl, data=data)
logger.info(res.text)
cookie = dict(res.cookies)
sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))
phpSession = 'PHP_SESSION=' + sess[0]
ApiTest.cookie = phpSession
logger.info('用例执行成功')
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "b28bada020ac593783ac62994bb45311ebb78813",
"index": 9055,
"step-1": "<mask token>\n\n\n@ddt\nclass ApiTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.keyword = Keyword()\n cls.cookie = None\n cls.confData = LoadIni('config.ini')\n logger.info('----------用例开始执行----------')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@ddt\nclass ApiTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.keyword = Keyword()\n cls.cookie = None\n cls.confData = LoadIni('config.ini')\n logger.info('----------用例开始执行----------')\n\n @file_data('../data/data.yaml')\n def test_1_login(self, username, password):\n s = requests.Session()\n loginUrl = self.confData.getConfig('urlConfig', 'login')\n data = {'uname': username, 'upass': password, 'encode': 1}\n res = s.post(url=loginUrl, data=data)\n logger.info(res.text)\n cookie = dict(res.cookies)\n sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))\n phpSession = 'PHP_SESSION=' + sess[0]\n ApiTest.cookie = phpSession\n logger.info('用例执行成功')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@ddt\nclass ApiTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.keyword = Keyword()\n cls.cookie = None\n cls.confData = LoadIni('config.ini')\n logger.info('----------用例开始执行----------')\n\n @file_data('../data/data.yaml')\n def test_1_login(self, username, password):\n s = requests.Session()\n loginUrl = self.confData.getConfig('urlConfig', 'login')\n data = {'uname': username, 'upass': password, 'encode': 1}\n res = s.post(url=loginUrl, data=data)\n logger.info(res.text)\n cookie = dict(res.cookies)\n sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))\n phpSession = 'PHP_SESSION=' + sess[0]\n ApiTest.cookie = phpSession\n logger.info('用例执行成功')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport jsonpath\nimport requests\nfrom apiunittest.lib.loadIni import LoadIni\nfrom apiunittest.keyword.keyword import Keyword\nfrom apiunittest.lib.log import logger\nfrom ddt import ddt, file_data\n\n\n@ddt\nclass ApiTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.keyword = Keyword()\n cls.cookie = None\n cls.confData = LoadIni('config.ini')\n logger.info('----------用例开始执行----------')\n\n @file_data('../data/data.yaml')\n def test_1_login(self, username, password):\n s = requests.Session()\n loginUrl = self.confData.getConfig('urlConfig', 'login')\n data = {'uname': username, 'upass': password, 'encode': 1}\n res = s.post(url=loginUrl, data=data)\n logger.info(res.text)\n cookie = dict(res.cookies)\n sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))\n phpSession = 'PHP_SESSION=' + sess[0]\n ApiTest.cookie = phpSession\n logger.info('用例执行成功')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"\r\n 测试用例\r\n\"\"\"\r\nimport unittest\r\nimport jsonpath\r\nimport requests\r\nfrom apiunittest.lib.loadIni import LoadIni\r\nfrom apiunittest.keyword.keyword import Keyword\r\nfrom apiunittest.lib.log import logger\r\nfrom ddt import ddt, file_data\r\n\r\n\r\n@ddt\r\nclass ApiTest(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(cls) -> None:\r\n cls.keyword = Keyword()\r\n cls.cookie = None\r\n cls.confData = LoadIni('config.ini')\r\n logger.info('----------用例开始执行----------')\r\n\r\n # 登录\r\n @file_data('../data/data.yaml')\r\n def test_1_login(self, username, password):\r\n s = requests.Session()\r\n loginUrl = self.confData.getConfig('urlConfig', 'login')\r\n\r\n data = {\r\n 'uname': username,\r\n 'upass': password,\r\n 'encode': 1\r\n }\r\n res = s.post(url=loginUrl, data=data)\r\n logger.info(res.text)\r\n cookie = dict(res.cookies)\r\n sess = jsonpath.jsonpath(cookie, '$..{0}'.format('PHPSESSION'))\r\n phpSession = 'PHP_SESSION=' + sess[0]\r\n ApiTest.cookie = phpSession\r\n logger.info('用例执行成功')\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('main_app', '0002_notebook_smathphone')]
operations = [migrations.RenameModel(old_name='Smathphone', new_name=
'Smartphone')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('main_app', '0002_notebook_smathphone')]
operations = [migrations.RenameModel(old_name='Smathphone', new_name=
'Smartphone')]
<|reserved_special_token_1|>
# Generated by Django 3.2.3 on 2021-05-23 19:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_notebook_smathphone'),
]
operations = [
migrations.RenameModel(
old_name='Smathphone',
new_name='Smartphone',
),
]
|
flexible
|
{
"blob_id": "7e11a33d82926ed544640a0192e905d373f575da",
"index": 2766,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0002_notebook_smathphone')]\n operations = [migrations.RenameModel(old_name='Smathphone', new_name=\n 'Smartphone')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0002_notebook_smathphone')]\n operations = [migrations.RenameModel(old_name='Smathphone', new_name=\n 'Smartphone')]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-05-23 19:41\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main_app', '0002_notebook_smathphone'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Smathphone',\n new_name='Smartphone',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/5/18 10:28
# File: 0668.py
# Desc: CV
class Solution:
def findKthNumber(self, m: int, n: int, k: int) -> int:
return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(x // i for i in range(x // n + 1, m + 1)))
|
normal
|
{
"blob_id": "ec9efeca7eef7b8ee25c1e089e675bdb1e53413b",
"index": 417,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def findKthNumber(self, m: int, n: int, k: int) ->int:\n return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(\n x // i for i in range(x // n + 1, m + 1)))\n",
"step-4": "# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/5/18 10:28\r\n# File: 0668.py\r\n# Desc: CV\r\n\r\nclass Solution:\r\n def findKthNumber(self, m: int, n: int, k: int) -> int:\r\n return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(x // i for i in range(x // n + 1, m + 1)))\r\n\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import sys
from pkg_resources import resource_string
# Load a package data file resource as a string. This
_conf = json.loads(resource_string(__name__, 'conf.json'))
# Load a data file specified in "package_data" setup option for this pkg.
_pkg_data = resource_string(__name__, 'data/pkg1.dat')
# Load a data file included in "data_files" setup option.
# FIXME
try:
_sys_data = open(sys.prefix + '/data/data1.dat').read()
except Exception as exc:
print(exc)
_sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'
def hello():
print(_conf['greeting'])
print(_pkg_data)
print(_sys_data)
if __name__ == '__main__':
hello()
|
normal
|
{
"blob_id": "4689ee7f7178cef16ac1f5375481a9ee8a48f924",
"index": 3780,
"step-1": "<mask token>\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-3": "<mask token>\n_conf = json.loads(resource_string(__name__, 'conf.json'))\n_pkg_data = resource_string(__name__, 'data/pkg1.dat')\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-4": "import json\nimport sys\nfrom pkg_resources import resource_string\n_conf = json.loads(resource_string(__name__, 'conf.json'))\n_pkg_data = resource_string(__name__, 'data/pkg1.dat')\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-5": "import json\nimport sys\nfrom pkg_resources import resource_string\n\n# Load a package data file resource as a string. This\n_conf = json.loads(resource_string(__name__, 'conf.json'))\n\n# Load a data file specified in \"package_data\" setup option for this pkg.\n_pkg_data = resource_string(__name__, 'data/pkg1.dat')\n\n# Load a data file included in \"data_files\" setup option.\n# FIXME\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
content = f.read(num + '.txt').decode('utf-8')
print(content)
comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))
match = re.search('Next nothing is (\\d+)', content)
if match == None:
break
num = match.group(1)
print(''.join(comments))
<|reserved_special_token_0|>
print(url)
print('http://www.pythonchallenge.com/pc/def/oxygen.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f = zipfile.ZipFile('channel.zip')
num = '90052'
comments = []
while True:
content = f.read(num + '.txt').decode('utf-8')
print(content)
comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))
match = re.search('Next nothing is (\\d+)', content)
if match == None:
break
num = match.group(1)
print(''.join(comments))
url = 'http://www.pythonchallenge.com/pc/def/hockey.html'
print(url)
print('http://www.pythonchallenge.com/pc/def/oxygen.html')
<|reserved_special_token_1|>
import zipfile, re
f = zipfile.ZipFile('channel.zip')
num = '90052'
comments = []
while True:
content = f.read(num + '.txt').decode('utf-8')
print(content)
comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))
match = re.search('Next nothing is (\\d+)', content)
if match == None:
break
num = match.group(1)
print(''.join(comments))
url = 'http://www.pythonchallenge.com/pc/def/hockey.html'
print(url)
print('http://www.pythonchallenge.com/pc/def/oxygen.html')
<|reserved_special_token_1|>
import zipfile, re
f = zipfile.ZipFile("channel.zip")
num = '90052'
comments = []
while True:
content = f.read(num + ".txt").decode("utf-8")
print(content)
comments.append(f.getinfo(num + ".txt").comment.decode("utf-8"))
match = re.search("Next nothing is (\d+)", content)
if match == None:
break
num = match.group(1)
print("".join(comments))
url = "http://www.pythonchallenge.com/pc/def/hockey.html"
print(url)
# look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y
print("http://www.pythonchallenge.com/pc/def/oxygen.html")
|
flexible
|
{
"blob_id": "b883e63c70f3dfeac3294989fab93c1331b6329c",
"index": 7990,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\n<mask token>\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n",
"step-3": "<mask token>\nf = zipfile.ZipFile('channel.zip')\nnum = '90052'\ncomments = []\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\nurl = 'http://www.pythonchallenge.com/pc/def/hockey.html'\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n",
"step-4": "import zipfile, re\nf = zipfile.ZipFile('channel.zip')\nnum = '90052'\ncomments = []\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\nurl = 'http://www.pythonchallenge.com/pc/def/hockey.html'\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n",
"step-5": "import zipfile, re\n\nf = zipfile.ZipFile(\"channel.zip\")\nnum = '90052'\ncomments = []\n\nwhile True:\n content = f.read(num + \".txt\").decode(\"utf-8\")\n print(content)\n comments.append(f.getinfo(num + \".txt\").comment.decode(\"utf-8\"))\n match = re.search(\"Next nothing is (\\d+)\", content)\n if match == None:\n break\n num = match.group(1)\nprint(\"\".join(comments))\n\nurl = \"http://www.pythonchallenge.com/pc/def/hockey.html\"\nprint(url)\n# look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y\n\nprint(\"http://www.pythonchallenge.com/pc/def/oxygen.html\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""------------------------------------------------------------------------
MODULE
FContactRegulatoryInfoBase -
DESCRIPTION:
This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods
VERSION: 1.0.25(0.25.7)
RESTRICTIONS/ LIMITATIONS:
1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.
2. This module is not customizable
3. The component may not work as expected with any modifications done to this module at user end
--------------------------------------------------------------------------"""
import string
import acm
import FIntegrationUtils
import FRegulatoryLogger
import ael
import FRegulatoryUtils
import FRegulatoryInfoException
logger = 'FContactRegulatoryInfoBase'
VALUE_NOT_SET = ()
class FContactRegulatoryInfoBase(object):
def __init__(self, contact = None):
"""class that maintains all data related to the regulatory on the FContact"""
try:
self.__contact = contact
if not self.__contact:
FRegulatoryLogger.ERROR(logger, "The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object")
return None
self.__reg_date_of_birth = None
self.__reg_first_name = None
self.__reg_last_name = None
self.__reg_national_id = None
self.__reg_crm_id = None
self.__crm_id_source = None
self.__reg_exchange_id = None
self.__reg_unique_name = None
self.__client_type = None
self.__is_general_partner = None
if contact:
self.__refresh(contact)
self.__integration_utils = FIntegrationUtils.FIntegrationUtils()
except Exception as e :
FRegulatoryLogger.ERROR(logger, str(e))
def __refresh(self, contact):
self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)
self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)
self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)
self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)
self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)
self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)
self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)
try:
self.__reg_unique_name = self.__contact.UniqueName()
except:
self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)
def Contact(self):
"""returns the contact for which this wrapper has all the addinfo/column values"""
return self.__contact
def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):
"""Date of birth of the concerned natural person"""
ael_reg_dob = None
if reg_date_of_birth != VALUE_NOT_SET:
try:
ael_reg_dob = ael.date_from_string(reg_date_of_birth)
except:
if reg_date_of_birth not in ['', None]:
msg = "The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo"%reg_date_of_birth
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
if ael_reg_dob:
self.__reg_date_of_birth = reg_date_of_birth
else:
self.__reg_date_of_birth = None
try:
self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)
except:
pass
else:
return self.__reg_date_of_birth
def FirstName(self, reg_first_name = VALUE_NOT_SET):
"""First name of the concerned natural person"""
if reg_first_name != VALUE_NOT_SET:
self.__reg_first_name = reg_first_name
try:
self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)
except:
pass
else:
if not self.__reg_first_name:
self.__reg_first_name = None
return self.__reg_first_name
def LastName(self, reg_last_name = VALUE_NOT_SET):
"""Last name of the concerned natural person"""
if reg_last_name != VALUE_NOT_SET:
self.__reg_last_name = reg_last_name
try:
self.__contact.AdditionalInfo().LastName(self.__reg_last_name)
except:
pass
else:
if not self.__reg_last_name:
self.__reg_last_name = None
return self.__reg_last_name
def NationalId(self, reg_national_id = VALUE_NOT_SET):
"""NationalId of the concerned natural person"""
if reg_national_id != VALUE_NOT_SET:
self.__reg_national_id = reg_national_id
try:
self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)
except:
pass
else:
if not self.__reg_national_id:
self.__reg_national_id = None
return self.__reg_national_id
def CrmId(self, crm_id = VALUE_NOT_SET):
"""CrmId of the concerned natural person"""
if crm_id != VALUE_NOT_SET:
self.__reg_crm_id = crm_id
try:
self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)
except:
pass
else:
if not self.__reg_crm_id:
self.__reg_crm_id = None
return self.__reg_crm_id
def ExchangeId(self, exchange_id = VALUE_NOT_SET):
"""The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged."""
if exchange_id != VALUE_NOT_SET:
if str(exchange_id).isdigit():
self.__reg_exchange_id = int(exchange_id)
elif str(exchange_id) in ['None', '']:
self.__reg_exchange_id = None
else:
msg = "The ExchangeId provided <%s> is not of the expected integer format"%str(exchange_id)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
try:
self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)
except:
pass
else:
if not self.__reg_exchange_id:
self.__reg_exchange_id = None
return self.__reg_exchange_id
def UniqueName(self, unique_name = VALUE_NOT_SET):
"""An optional unique name, if specified there can only be one contact with this name for each party."""
if unique_name != VALUE_NOT_SET:
try:
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:
self.__contact.UniqueName(unique_name)
else:
is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)
if is_unique:
try:
self.__contact.AdditionalInfo().UniqueName(unique_name)
except:
pass
else:
msg = "The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name."%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)
FRegulatoryLogger.ERROR(logger, msg)
raise FRegulatoryInfoException.FRegInfoInvalidData(msg)
self.__reg_unique_name = unique_name
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))
else:
if not self.__reg_unique_name:
self.__reg_unique_name = None
return self.__reg_unique_name
def ClientType(self):
"""returns the ClientType based on where the CrmId is found on the linked objects"""
self.__client_type = FRegulatoryUtils.getClientType(self.__contact)
return self.__client_type
def JointAccount(self):
"""Another trader that jointly owns the account with this trader"""
joint_accounts = []
if self.IsGeneralPartner():
for contact in self.__contact.Party().Contacts():
if contact.AdditionalInfo().RegGeneralPartner():
joint_accounts.append(contact)
else:
FRegulatoryLogger.WARN(logger, "<%s> is not a General Partner. Hence JointAccount is None"%self.__contact.Fullname())
joint_accounts = None
return joint_accounts
def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):
"""General partner has responsibility for the actions of the business, can legally bind
the business and is personally liable for all the business's debts and obligations."""
if is_general_partner != VALUE_NOT_SET:
self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is being set to <%s>."%(str(self.__is_general_partner)))
try:
self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)
except:
pass
else:
if str(self.__is_general_partner) == "None":
FRegulatoryLogger.DEBUG(logger, "The IsGeneralPartner is None. Hence defaulting it to False")
self.__is_general_partner = False
return self.__is_general_partner
def __setattr__(self, attr, val):
if attr.startswith('_'):
super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)
else:
if hasattr(self, attr):
getattr(self, attr)(val)
def Commit(self):
"""Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact"""
try:
acm.BeginTransaction()
self.__contact.Commit()
if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:
self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)
self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)
self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)
self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)
self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)
self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)
self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)
self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)
acm.CommitTransaction()
except Exception as e:
FRegulatoryLogger.ERROR(logger, str(e))
FRegulatoryLogger.ERROR(logger, "ABORTING TRANSACTION***********")
acm.AbortTransaction()
def Delete(self):
"""Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS"""
FRegulatoryUtils.Delete(self.__contact, "Contact")
FRegulatoryLogger.DEBUG(logger, "Deleted all AdditionalInfos on Contact related to Regulatory Reporting")
def Attributes(self):
"""returns the attributes on the FContactRegulatoryInfoBase instance"""
return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)
def RegulatoryInfo(self):
"""returns the FContactRegulatoryInfoBase instance for the given contact"""
conactRegInfo = FContactRegulatoryInfo(self)
return conactRegInfo
def Select(query):
"""Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query"""
party = None
if query.find('and party') != -1:#it means there is an additional condition added
pos = query.find('and party')
party_name = query[(pos + len('and party')):]
query = query[0:pos]
party_name = party_name.replace('=', '').replace("'", '')
party_name = party_name.strip()
party = acm.FParty[party_name]
return_result = FRegulatoryUtils.Select(query, "FContact", party)
return return_result
|
normal
|
{
"blob_id": "d4e62950f10efeb27d19c3d9c672969342ef8c7c",
"index": 3095,
"step-1": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n <mask token>\n <mask token>\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n <mask token>\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n <mask token>\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n <mask token>\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n <mask token>\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n <mask token>\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n <mask token>\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts():\n if contact.AdditionalInfo().RegGeneralPartner():\n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \n '<%s> is not a General Partner. Hence JointAccount is None' %\n self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(\n ) < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth',\n self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName',\n self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName',\n self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId',\n self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId',\n self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info(\n 'RegContExchangeId', self.__contact, self.__reg_exchange_id\n )\n self.__integration_utils.set_additional_info('UniqueName',\n self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info(\n 'RegGeneralPartner', self.__contact, self.\n __is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FContactRegulatoryInfoBase(object):\n\n def __init__(self, contact=None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger,\n 'The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object'\n )\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value(\n 'dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName',\n self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName',\n self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value(\n 'nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value(\n 'regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value(\n 'regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value(\n 'regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value(\n 'uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth=VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = (\n 'The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo'\n % reg_date_of_birth)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.\n __reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name=VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name\n )\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name=VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id=VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.\n __reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id=VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.\n __reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id=VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = (\n 'The ExchangeId provided <%s> is not of the expected integer format'\n % str(exchange_id))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.\n __reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name=VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if (FIntegrationUtils.FIntegrationUtils.\n get_acm_version_override() >= 2017.2):\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(\n self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(\n unique_name)\n except:\n pass\n else:\n msg = (\n 'The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.'\n % (unique_name, self.__contact.Fullname(),\n self.__contact.Party().Name(), contact_name))\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts():\n if contact.AdditionalInfo().RegGeneralPartner():\n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \n '<%s> is not a General Partner. Hence JointAccount is None' %\n self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts\n\n def IsGeneralPartner(self, is_general_partner=VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(\n is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \n 'The IsGeneralPartner is being set to <%s>.' % str(self.\n __is_general_partner))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.\n __is_general_partner)\n except:\n pass\n else:\n if str(self.__is_general_partner) == 'None':\n FRegulatoryLogger.DEBUG(logger,\n 'The IsGeneralPartner is None. Hence defaulting it to False'\n )\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n elif hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override(\n ) < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth',\n self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName',\n self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName',\n self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId',\n self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId',\n self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info(\n 'RegContExchangeId', self.__contact, self.__reg_exchange_id\n )\n self.__integration_utils.set_additional_info('UniqueName',\n self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info(\n 'RegGeneralPartner', self.__contact, self.\n __is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, 'ABORTING TRANSACTION***********')\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, 'Contact')\n FRegulatoryLogger.DEBUG(logger,\n 'Deleted all AdditionalInfos on Contact related to Regulatory Reporting'\n )\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\n\ndef RegulatoryInfo(self):\n \"\"\"returns the FContactRegulatoryInfoBase instance for the given contact\"\"\"\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo\n\n\ndef Select(query):\n \"\"\"Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query\"\"\"\n party = None\n if query.find('and party') != -1:\n pos = query.find('and party')\n party_name = query[pos + len('and party'):]\n query = query[0:pos]\n party_name = party_name.replace('=', '').replace(\"'\", '')\n party_name = party_name.strip()\n party = acm.FParty[party_name]\n return_result = FRegulatoryUtils.Select(query, 'FContact', party)\n return return_result\n",
"step-5": "\"\"\"------------------------------------------------------------------------\nMODULE\n FContactRegulatoryInfoBase -\nDESCRIPTION:\n This file provides the custom instance of RegulatoryInfo on the Contact which has all the RegulatoryInfo related methods\nVERSION: 1.0.25(0.25.7)\nRESTRICTIONS/ LIMITATIONS:\n 1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.\n 2. This module is not customizable\n 3. The component may not work as expected with any modifications done to this module at user end\n--------------------------------------------------------------------------\"\"\"\nimport string\nimport acm\nimport FIntegrationUtils\nimport FRegulatoryLogger\nimport ael\nimport FRegulatoryUtils\nimport FRegulatoryInfoException\nlogger = 'FContactRegulatoryInfoBase'\nVALUE_NOT_SET = ()\n\nclass FContactRegulatoryInfoBase(object):\n def __init__(self, contact = None):\n \"\"\"class that maintains all data related to the regulatory on the FContact\"\"\"\n try:\n self.__contact = contact\n if not self.__contact:\n FRegulatoryLogger.ERROR(logger, \"The name on the contact is the unique identifier of the contact. Kindly provide a valid acm.FContact object\")\n return None\n self.__reg_date_of_birth = None\n self.__reg_first_name = None\n self.__reg_last_name = None\n self.__reg_national_id = None\n self.__reg_crm_id = None\n self.__crm_id_source = None\n self.__reg_exchange_id = None\n self.__reg_unique_name = None\n self.__client_type = None\n self.__is_general_partner = None\n if contact:\n self.__refresh(contact)\n self.__integration_utils = FIntegrationUtils.FIntegrationUtils()\n except Exception as e :\n FRegulatoryLogger.ERROR(logger, str(e))\n\n def __refresh(self, contact):\n self.__reg_date_of_birth = FRegulatoryUtils.get_addinfo_value('dateOfBirth', self.__contact)\n self.__reg_first_name = FRegulatoryUtils.get_addinfo_value('firstName', self.__contact)\n self.__reg_last_name = FRegulatoryUtils.get_addinfo_value('lastName', self.__contact)\n self.__reg_national_id = FRegulatoryUtils.get_addinfo_value('nationalId', self.__contact)\n self.__is_general_partner = FRegulatoryUtils.get_addinfo_value('regGeneralPartner', self.__contact)\n self.__reg_crm_id = FRegulatoryUtils.get_addinfo_value('regContactCrmId', self.__contact)\n self.__reg_exchange_id = FRegulatoryUtils.get_addinfo_value('regContExchangeId', self.__contact)\n try:\n self.__reg_unique_name = self.__contact.UniqueName()\n except:\n self.__reg_unique_name = FRegulatoryUtils.get_addinfo_value('uniqueName', self.__contact)\n\n def Contact(self):\n \"\"\"returns the contact for which this wrapper has all the addinfo/column values\"\"\"\n return self.__contact\n\n def DateOfBirth(self, reg_date_of_birth = VALUE_NOT_SET):\n \"\"\"Date of birth of the concerned natural person\"\"\"\n ael_reg_dob = None\n if reg_date_of_birth != VALUE_NOT_SET:\n try:\n ael_reg_dob = ael.date_from_string(reg_date_of_birth)\n except:\n if reg_date_of_birth not in ['', None]:\n msg = \"The value <%s> provided for DateOfBirth is invalid and hence will not be set of the DateOfBirth AdditionalInfo\"%reg_date_of_birth\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n if ael_reg_dob:\n self.__reg_date_of_birth = reg_date_of_birth\n else:\n self.__reg_date_of_birth = None\n try:\n self.__contact.AdditionalInfo().DateOfBirth(self.__reg_date_of_birth)\n except:\n pass\n else:\n return self.__reg_date_of_birth\n\n def FirstName(self, reg_first_name = VALUE_NOT_SET):\n \"\"\"First name of the concerned natural person\"\"\"\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name\n\n def LastName(self, reg_last_name = VALUE_NOT_SET):\n \"\"\"Last name of the concerned natural person\"\"\"\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name\n\n def NationalId(self, reg_national_id = VALUE_NOT_SET):\n \"\"\"NationalId of the concerned natural person\"\"\"\n if reg_national_id != VALUE_NOT_SET:\n self.__reg_national_id = reg_national_id\n try:\n self.__contact.AdditionalInfo().NationalId(self.__reg_national_id)\n except:\n pass\n else:\n if not self.__reg_national_id:\n self.__reg_national_id = None\n return self.__reg_national_id\n\n def CrmId(self, crm_id = VALUE_NOT_SET):\n \"\"\"CrmId of the concerned natural person\"\"\"\n if crm_id != VALUE_NOT_SET:\n self.__reg_crm_id = crm_id\n try:\n self.__contact.AdditionalInfo().RegContactCrmId(self.__reg_crm_id)\n except:\n pass\n else:\n if not self.__reg_crm_id:\n self.__reg_crm_id = None\n return self.__reg_crm_id\n\n def ExchangeId(self, exchange_id = VALUE_NOT_SET):\n \"\"\"The identifier used towards/by an exchange to identify a person or legal entity, before the actual national id or the LEI is divulged.\"\"\"\n if exchange_id != VALUE_NOT_SET:\n if str(exchange_id).isdigit():\n self.__reg_exchange_id = int(exchange_id)\n elif str(exchange_id) in ['None', '']:\n self.__reg_exchange_id = None\n else:\n msg = \"The ExchangeId provided <%s> is not of the expected integer format\"%str(exchange_id)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n try:\n self.__contact.AdditionalInfo().RegContExchangeId(self.__reg_exchange_id)\n except:\n pass\n else:\n if not self.__reg_exchange_id:\n self.__reg_exchange_id = None\n return self.__reg_exchange_id\n\n def UniqueName(self, unique_name = VALUE_NOT_SET):\n \"\"\"An optional unique name, if specified there can only be one contact with this name for each party.\"\"\"\n if unique_name != VALUE_NOT_SET:\n try:\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() >= 2017.2:\n self.__contact.UniqueName(unique_name)\n else:\n is_unique, contact_name = FRegulatoryUtils.is_unique_name(self.__contact, unique_name)\n if is_unique:\n try:\n self.__contact.AdditionalInfo().UniqueName(unique_name)\n except:\n pass\n else:\n msg = \"The uniqueName <%s> provided for contact <%s> on party <%s> is not unique. Another contact <%s> already has this unique name.\"%(unique_name, self.__contact.Fullname(), self.__contact.Party().Name(), contact_name)\n FRegulatoryLogger.ERROR(logger, msg)\n raise FRegulatoryInfoException.FRegInfoInvalidData(msg)\n self.__reg_unique_name = unique_name\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n raise FRegulatoryInfoException.FRegInfoInvalidData(str(e))\n else:\n if not self.__reg_unique_name:\n self.__reg_unique_name = None\n return self.__reg_unique_name\n\n def ClientType(self):\n \"\"\"returns the ClientType based on where the CrmId is found on the linked objects\"\"\"\n self.__client_type = FRegulatoryUtils.getClientType(self.__contact)\n return self.__client_type\n\n def JointAccount(self):\n \"\"\"Another trader that jointly owns the account with this trader\"\"\"\n joint_accounts = []\n if self.IsGeneralPartner():\n for contact in self.__contact.Party().Contacts(): \n if contact.AdditionalInfo().RegGeneralPartner(): \n joint_accounts.append(contact)\n else:\n FRegulatoryLogger.WARN(logger, \"<%s> is not a General Partner. Hence JointAccount is None\"%self.__contact.Fullname())\n joint_accounts = None\n return joint_accounts \n\n def IsGeneralPartner(self, is_general_partner = VALUE_NOT_SET):\n \"\"\"General partner has responsibility for the actions of the business, can legally bind\n the business and is personally liable for all the business's debts and obligations.\"\"\"\n if is_general_partner != VALUE_NOT_SET:\n self.__is_general_partner = FRegulatoryUtils.get_bool(is_general_partner, 'IsGeneralPartner')\n FRegulatoryLogger.DEBUG(logger, \"The IsGeneralPartner is being set to <%s>.\"%(str(self.__is_general_partner)))\n try:\n self.__contact.AdditionalInfo().RegGeneralPartner(self.__is_general_partner)\n except:\n pass\n\n else:\n if str(self.__is_general_partner) == \"None\":\n FRegulatoryLogger.DEBUG(logger, \"The IsGeneralPartner is None. Hence defaulting it to False\")\n self.__is_general_partner = False\n return self.__is_general_partner\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n super(FContactRegulatoryInfoBase, self).__setattr__(attr, val)\n else:\n if hasattr(self, attr):\n getattr(self, attr)(val)\n\n def Commit(self):\n \"\"\"Committing this instance will automatically commit all the RegulatorySupport related attributes on the contact\"\"\"\n try:\n acm.BeginTransaction()\n self.__contact.Commit()\n if FIntegrationUtils.FIntegrationUtils.get_acm_version_override() < 2015.4:\n self.__integration_utils.set_additional_info('DateOfBirth', self.__contact, self.__reg_date_of_birth)\n self.__integration_utils.set_additional_info('FirstName', self.__contact, self.__reg_first_name)\n self.__integration_utils.set_additional_info('LastName', self.__contact, self.__reg_last_name)\n self.__integration_utils.set_additional_info('NationalId', self.__contact, self.__reg_national_id)\n self.__integration_utils.set_additional_info('RegContactCrmId', self.__contact, self.__reg_crm_id)\n self.__integration_utils.set_additional_info('RegContExchangeId', self.__contact, self.__reg_exchange_id)\n self.__integration_utils.set_additional_info('UniqueName', self.__contact, self.__reg_unique_name)\n self.__integration_utils.set_additional_info('RegGeneralPartner', self.__contact, self.__is_general_partner)\n acm.CommitTransaction()\n except Exception as e:\n FRegulatoryLogger.ERROR(logger, str(e))\n FRegulatoryLogger.ERROR(logger, \"ABORTING TRANSACTION***********\")\n acm.AbortTransaction()\n\n def Delete(self):\n \"\"\"Deleting this instance automatically deletes all the attributes related to the reporting on the instrument or on the ContactRegulatoryInfo in the ADS\"\"\"\n FRegulatoryUtils.Delete(self.__contact, \"Contact\")\n FRegulatoryLogger.DEBUG(logger, \"Deleted all AdditionalInfos on Contact related to Regulatory Reporting\")\n\n def Attributes(self):\n \"\"\"returns the attributes on the FContactRegulatoryInfoBase instance\"\"\"\n return FRegulatoryUtils.log_attributes('FContactRegulatoryInfo', self)\n\ndef RegulatoryInfo(self):\n \"\"\"returns the FContactRegulatoryInfoBase instance for the given contact\"\"\"\n conactRegInfo = FContactRegulatoryInfo(self)\n return conactRegInfo\n\ndef Select(query):\n \"\"\"Return a collection of FContactRegulatoryInfoBase instances matching constraint specified in the Select query\"\"\"\n party = None\n if query.find('and party') != -1:#it means there is an additional condition added\n pos = query.find('and party')\n party_name = query[(pos + len('and party')):]\n query = query[0:pos]\n party_name = party_name.replace('=', '').replace(\"'\", '')\n party_name = party_name.strip()\n party = acm.FParty[party_name]\n return_result = FRegulatoryUtils.Select(query, \"FContact\", party)\n return return_result\n\n",
"step-ids": [
13,
15,
18,
20,
23
]
}
|
[
13,
15,
18,
20,
23
] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PredictionQueryToken(Model):
"""PredictionQueryToken.
:param session:
:type session: str
:param continuation:
:type continuation: str
:param max_count:
:type max_count: int
:param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'
:type order_by: str or
~azure.cognitiveservices.vision.customvision.training.models.enum
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]
:param iteration_id:
:type iteration_id: str
:param start_time:
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param application:
:type application: str
"""
_attribute_map = {
'session': {'key': 'Session', 'type': 'str'},
'continuation': {'key': 'Continuation', 'type': 'str'},
'max_count': {'key': 'MaxCount', 'type': 'int'},
'order_by': {'key': 'OrderBy', 'type': 'str'},
'tags': {'key': 'Tags', 'type': '[PredictionQueryTag]'},
'iteration_id': {'key': 'IterationId', 'type': 'str'},
'start_time': {'key': 'StartTime', 'type': 'iso-8601'},
'end_time': {'key': 'EndTime', 'type': 'iso-8601'},
'application': {'key': 'Application', 'type': 'str'},
}
def __init__(self, session=None, continuation=None, max_count=None, order_by=None, tags=None, iteration_id=None, start_time=None, end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
|
normal
|
{
"blob_id": "0719448e7eb8d48e636be1332c904beebf27e02d",
"index": 4163,
"step-1": "<mask token>\n\n\nclass PredictionQueryToken(Model):\n <mask token>\n <mask token>\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-2": "<mask token>\n\n\nclass PredictionQueryToken(Model):\n <mask token>\n _attribute_map = {'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':\n {'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',\n 'type': 'str'}, 'tags': {'key': 'Tags', 'type':\n '[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',\n 'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':\n 'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'}}\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-3": "<mask token>\n\n\nclass PredictionQueryToken(Model):\n \"\"\"PredictionQueryToken.\n\n :param session:\n :type session: str\n :param continuation:\n :type continuation: str\n :param max_count:\n :type max_count: int\n :param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'\n :type order_by: str or\n ~azure.cognitiveservices.vision.customvision.training.models.enum\n :param tags:\n :type tags:\n list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]\n :param iteration_id:\n :type iteration_id: str\n :param start_time:\n :type start_time: datetime\n :param end_time:\n :type end_time: datetime\n :param application:\n :type application: str\n \"\"\"\n _attribute_map = {'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':\n {'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',\n 'type': 'str'}, 'tags': {'key': 'Tags', 'type':\n '[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',\n 'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':\n 'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'}}\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-4": "from msrest.serialization import Model\n\n\nclass PredictionQueryToken(Model):\n \"\"\"PredictionQueryToken.\n\n :param session:\n :type session: str\n :param continuation:\n :type continuation: str\n :param max_count:\n :type max_count: int\n :param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'\n :type order_by: str or\n ~azure.cognitiveservices.vision.customvision.training.models.enum\n :param tags:\n :type tags:\n list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]\n :param iteration_id:\n :type iteration_id: str\n :param start_time:\n :type start_time: datetime\n :param end_time:\n :type end_time: datetime\n :param application:\n :type application: str\n \"\"\"\n _attribute_map = {'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':\n {'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',\n 'type': 'str'}, 'tags': {'key': 'Tags', 'type':\n '[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',\n 'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':\n 'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'}}\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-5": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass PredictionQueryToken(Model):\n \"\"\"PredictionQueryToken.\n\n :param session:\n :type session: str\n :param continuation:\n :type continuation: str\n :param max_count:\n :type max_count: int\n :param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'\n :type order_by: str or\n ~azure.cognitiveservices.vision.customvision.training.models.enum\n :param tags:\n :type tags:\n list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]\n :param iteration_id:\n :type iteration_id: str\n :param start_time:\n :type start_time: datetime\n :param end_time:\n :type end_time: datetime\n :param application:\n :type application: str\n \"\"\"\n\n _attribute_map = {\n 'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'},\n 'max_count': {'key': 'MaxCount', 'type': 'int'},\n 'order_by': {'key': 'OrderBy', 'type': 'str'},\n 'tags': {'key': 'Tags', 'type': '[PredictionQueryTag]'},\n 'iteration_id': {'key': 'IterationId', 'type': 'str'},\n 'start_time': {'key': 'StartTime', 'type': 'iso-8601'},\n 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'},\n }\n\n def __init__(self, session=None, continuation=None, max_count=None, order_by=None, tags=None, iteration_id=None, start_time=None, end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PhoneSerializer(serializers.ModelSerializer):
class Meta:
model = Phones
fields = 'id', 'number', 'area_code', 'country_code'
<|reserved_special_token_1|>
from rest_framework import serializers
from api.models.Phones import Phones
class PhoneSerializer(serializers.ModelSerializer):
class Meta:
model = Phones
fields = 'id', 'number', 'area_code', 'country_code'
<|reserved_special_token_1|>
from rest_framework import serializers
from api.models.Phones import Phones
class PhoneSerializer(serializers.ModelSerializer):
class Meta:
model = Phones
fields = (
'id', 'number', 'area_code', 'country_code'
)
|
flexible
|
{
"blob_id": "e3ba6395a8d7272fc7e5a8be37e6b0b18c355e14",
"index": 9272,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PhoneSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Phones\n fields = 'id', 'number', 'area_code', 'country_code'\n",
"step-3": "from rest_framework import serializers\nfrom api.models.Phones import Phones\n\n\nclass PhoneSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Phones\n fields = 'id', 'number', 'area_code', 'country_code'\n",
"step-4": "from rest_framework import serializers\n\nfrom api.models.Phones import Phones\n\n\nclass PhoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Phones\n fields = (\n 'id', 'number', 'area_code', 'country_code'\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding=utf-8
from datetime import datetime, timedelta
from flask import current_app as app
from flask_script import Command
from main import db
from models.payment import Payment
from models.product import ProductGroup, Product, PriceTier, Price, ProductView, ProductViewProduct
from models.purchase import Purchase
def create_product_groups():
top_level_groups = [
# name, capacity, expires
('admissions', datetime(2018, 9, 3), app.config.get('MAXIMUM_ADMISSIONS')),
('parking', datetime(2018, 9, 3), None),
('campervan', datetime(2018, 9, 3), None),
('merchandise', datetime(2018, 8, 12), None),
]
for name, expires, capacity in top_level_groups:
if ProductGroup.get_by_name(name):
continue
pg = ProductGroup(name=name, type=name, capacity_max=capacity, expires=expires)
db.session.add(pg)
db.session.flush()
allocations = [
# name, capacity
('vendors', 100),
('sponsors', 200),
('speakers', 100),
('general', 800),
]
admissions = ProductGroup.get_by_name('admissions')
for name, capacity in allocations:
if ProductGroup.get_by_name(name):
continue
ProductGroup(name=name, capacity_max=capacity, parent=admissions)
view = ProductView.get_by_name('main')
if not view:
view = ProductView('main', 'tickets')
db.session.add(view)
db.session.flush()
general = ProductGroup.get_by_name('general')
products = [
# name, display name, transferable, badge, capacity, description, (std cap, gbp eur), (early cap, gbp, eur), (late cap, gbp, eur)
('full', 'Full Camp Ticket', True, True, None, 'Full ticket',
((1500, 115, 135), (250, 105, 125), (None, 125, 145))
),
('full-s', 'Full Camp Ticket (Supporter)', True, True, None, 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.',
((None, 150, 180),)
),
('full-sg', 'Full Camp Ticket (Gold Supporter)', True, True, None, 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.',
((None, 200, 240),)
),
('u18', 'Under-18', True, False, 150, 'For visitors born after August 30th, 2000. All under-18s must be accompanied by an adult.',
((None, 55, 63),)
),
('u12', 'Under-12', True, False, 50, 'For children born after August 30th, 2006. All children must be accompanied by an adult.',
((None, 0, 0),)
),
]
order = 0
for name, display_name, has_xfer, has_badge, capacity, description, prices in products:
if Product.get_by_name('general', name):
continue
product = Product(name=name, display_name=display_name, capacity_max=capacity,
description=description, parent=general,
attributes={'is_transferable': has_xfer,
'has_badge': has_badge})
for index, (price_cap, gbp, eur) in enumerate(prices):
if len(prices) == 1 or index == 0:
tier_name = name + '-std'
active = True
elif index == 1:
tier_name = name + '-early-bird'
active = False
elif index == 2:
tier_name = name + '-late'
active = False
if PriceTier.get_by_name('general', 'name', tier_name):
continue
pt = PriceTier(name=tier_name, capacity_max=price_cap, personal_limit=10, parent=product, active=active)
Price(currency='GBP', price_int=gbp * 100, price_tier=pt)
Price(currency='EUR', price_int=eur * 100, price_tier=pt)
ProductViewProduct(view, product, order)
order += 1
db.session.flush()
misc = [
# name, display_name, cap, personal_limit, gbp, eur, description
('parking', 'Parking Ticket', 1700, 4, 15, 21, "We're trying to keep cars to a minimum. Please take public transport or car-share if you can."),
('campervan', 'Caravan/\u200cCampervan Ticket', 60, 2, 30, 42, "If you bring a caravan, you won't need a separate parking ticket for the towing car."),
]
for name, display_name, cap, personal_limit, gbp, eur, description in misc:
if Product.get_by_name(name, name):
continue
group = ProductGroup.get_by_name(name)
product = Product(name=name, display_name=display_name, description=description, parent=group)
pt = PriceTier(name=name, personal_limit=personal_limit, parent=product)
db.session.add(pt)
db.session.add(Price(currency='GBP', price_int=gbp * 100, price_tier=pt))
db.session.add(Price(currency='EUR', price_int=eur * 100, price_tier=pt))
ProductViewProduct(view, product, order)
order += 1
db.session.commit()
# ('t-shirt', 'T-Shirt', 200, 10, 10, 12, "Pre-order the official Electromagnetic Field t-shirt. T-shirts will be available to collect during the event."),
class CreateTickets(Command):
def run(self):
create_product_groups()
class CancelReservedTickets(Command):
def run(self):
# Payments where someone started the process but didn't complete
payments = Purchase.query.filter(
Purchase.state == 'reserved',
Purchase.modified < datetime.utcnow() - timedelta(days=3),
~Purchase.payment_id.is_(None),
).join(Payment).with_entities(Payment).group_by(Payment)
for payment in payments:
payment.lock()
app.logger.info('Cancelling payment %s', payment.id)
assert payment.state == 'new' and payment.provider in {'gocardless', 'stripe'}
payment.cancel()
# Purchases that were added to baskets but not checked out
purchases = Purchase.query.filter(
Purchase.state == 'reserved',
Purchase.modified < datetime.utcnow() - timedelta(days=3),
Purchase.payment_id.is_(None),
)
for purchase in purchases:
app.logger.info('Cancelling purchase %s', purchase.id)
purchase.cancel()
db.session.commit()
class SendTransferReminder(Command):
def run(self):
pass
# users_to_email = User.query.join(Ticket, TicketType).filter(
# TicketType.admits == 'full',
# Ticket.paid == True, # noqa: E712
# Ticket.transfer_reminder_sent == False,
# ).group_by(User).having(func.count() > 1)
# for user in users_to_email:
# msg = Message("Your Electromagnetic Field Tickets",
# sender=app.config['TICKETS_EMAIL'],
# recipients=[user.email])
# msg.body = render_template("emails/transfer-reminder.txt", user=user)
# app.logger.info('Emailing %s transfer reminder', user.email)
# mail.send(msg)
# for ticket in user.tickets:
# ticket.transfer_reminder_sent = True
# db.session.commit()
class SendTickets(Command):
def run(self):
pass
# paid_items = Ticket.query.filter_by(paid=True).join(TicketType).filter(or_(
# TicketType.admits.in_(['full', 'kid', 'car', 'campervan']),
# TicketType.fixed_id.in_(range(14, 24))))
# users = (paid_items.filter(Ticket.emailed == False).join(User) # noqa: E712
# .group_by(User).with_entities(User).order_by(User.id))
# for user in users:
# user_tickets = Ticket.query.filter_by(paid=True).join(TicketType, User).filter(
# TicketType.admits.in_(['full', 'kid', 'car', 'campervan']),
# User.id == user.id)
# plural = (user_tickets.count() != 1 and 's' or '')
# msg = Message("Your Electromagnetic Field Ticket%s" % plural,
# sender=app.config['TICKETS_EMAIL'],
# recipients=[user.email])
# msg.body = render_template("emails/receipt.txt", user=user)
# attach_tickets(msg, user)
# app.logger.info('Emailing %s receipt for %s tickets', user.email, user_tickets.count())
# mail.send(msg)
# db.session.commit()
|
normal
|
{
"blob_id": "1de46ee2818b4cb2ae68ef5870581c341f8d9b04",
"index": 4020,
"step-1": "<mask token>\n\n\nclass CancelReservedTickets(Command):\n\n def run(self):\n payments = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3), ~\n Purchase.payment_id.is_(None)).join(Payment).with_entities(Payment\n ).group_by(Payment)\n for payment in payments:\n payment.lock()\n app.logger.info('Cancelling payment %s', payment.id)\n assert payment.state == 'new' and payment.provider in {'gocardless'\n , 'stripe'}\n payment.cancel()\n purchases = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3),\n Purchase.payment_id.is_(None))\n for purchase in purchases:\n app.logger.info('Cancelling purchase %s', purchase.id)\n purchase.cancel()\n db.session.commit()\n\n\nclass SendTransferReminder(Command):\n\n def run(self):\n pass\n\n\nclass SendTickets(Command):\n\n def run(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass CreateTickets(Command):\n <mask token>\n\n\nclass CancelReservedTickets(Command):\n\n def run(self):\n payments = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3), ~\n Purchase.payment_id.is_(None)).join(Payment).with_entities(Payment\n ).group_by(Payment)\n for payment in payments:\n payment.lock()\n app.logger.info('Cancelling payment %s', payment.id)\n assert payment.state == 'new' and payment.provider in {'gocardless'\n , 'stripe'}\n payment.cancel()\n purchases = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3),\n Purchase.payment_id.is_(None))\n for purchase in purchases:\n app.logger.info('Cancelling purchase %s', purchase.id)\n purchase.cancel()\n db.session.commit()\n\n\nclass SendTransferReminder(Command):\n\n def run(self):\n pass\n\n\nclass SendTickets(Command):\n\n def run(self):\n pass\n",
"step-3": "<mask token>\n\n\ndef create_product_groups():\n top_level_groups = [('admissions', datetime(2018, 9, 3), app.config.get\n ('MAXIMUM_ADMISSIONS')), ('parking', datetime(2018, 9, 3), None), (\n 'campervan', datetime(2018, 9, 3), None), ('merchandise', datetime(\n 2018, 8, 12), None)]\n for name, expires, capacity in top_level_groups:\n if ProductGroup.get_by_name(name):\n continue\n pg = ProductGroup(name=name, type=name, capacity_max=capacity,\n expires=expires)\n db.session.add(pg)\n db.session.flush()\n allocations = [('vendors', 100), ('sponsors', 200), ('speakers', 100),\n ('general', 800)]\n admissions = ProductGroup.get_by_name('admissions')\n for name, capacity in allocations:\n if ProductGroup.get_by_name(name):\n continue\n ProductGroup(name=name, capacity_max=capacity, parent=admissions)\n view = ProductView.get_by_name('main')\n if not view:\n view = ProductView('main', 'tickets')\n db.session.add(view)\n db.session.flush()\n general = ProductGroup.get_by_name('general')\n products = [('full', 'Full Camp Ticket', True, True, None,\n 'Full ticket', ((1500, 115, 135), (250, 105, 125), (None, 125, 145)\n )), ('full-s', 'Full Camp Ticket (Supporter)', True, True, None,\n 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.'\n , ((None, 150, 180),)), ('full-sg',\n 'Full Camp Ticket (Gold Supporter)', True, True, None,\n 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.'\n , ((None, 200, 240),)), ('u18', 'Under-18', True, False, 150,\n 'For visitors born after August 30th, 2000. All under-18s must be accompanied by an adult.'\n , ((None, 55, 63),)), ('u12', 'Under-12', True, False, 50,\n 'For children born after August 30th, 2006. All children must be accompanied by an adult.'\n , ((None, 0, 0),))]\n order = 0\n for name, display_name, has_xfer, has_badge, capacity, description, prices in products:\n if Product.get_by_name('general', name):\n continue\n product = Product(name=name, display_name=display_name,\n capacity_max=capacity, description=description, parent=general,\n attributes={'is_transferable': has_xfer, 'has_badge': has_badge})\n for index, (price_cap, gbp, eur) in enumerate(prices):\n if len(prices) == 1 or index == 0:\n tier_name = name + '-std'\n active = True\n elif index == 1:\n tier_name = name + '-early-bird'\n active = False\n elif index == 2:\n tier_name = name + '-late'\n active = False\n if PriceTier.get_by_name('general', 'name', tier_name):\n continue\n pt = PriceTier(name=tier_name, capacity_max=price_cap,\n personal_limit=10, parent=product, active=active)\n Price(currency='GBP', price_int=gbp * 100, price_tier=pt)\n Price(currency='EUR', price_int=eur * 100, price_tier=pt)\n ProductViewProduct(view, product, order)\n order += 1\n db.session.flush()\n misc = [('parking', 'Parking Ticket', 1700, 4, 15, 21,\n \"We're trying to keep cars to a minimum. Please take public transport or car-share if you can.\"\n ), ('campervan', 'Caravan/\\u200cCampervan Ticket', 60, 2, 30, 42,\n \"If you bring a caravan, you won't need a separate parking ticket for the towing car.\"\n )]\n for name, display_name, cap, personal_limit, gbp, eur, description in misc:\n if Product.get_by_name(name, name):\n continue\n group = ProductGroup.get_by_name(name)\n product = Product(name=name, display_name=display_name, description\n =description, parent=group)\n pt = PriceTier(name=name, personal_limit=personal_limit, parent=product\n )\n db.session.add(pt)\n db.session.add(Price(currency='GBP', price_int=gbp * 100,\n price_tier=pt))\n db.session.add(Price(currency='EUR', price_int=eur * 100,\n price_tier=pt))\n ProductViewProduct(view, product, order)\n order += 1\n db.session.commit()\n\n\nclass CreateTickets(Command):\n\n def run(self):\n create_product_groups()\n\n\nclass CancelReservedTickets(Command):\n\n def run(self):\n payments = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3), ~\n Purchase.payment_id.is_(None)).join(Payment).with_entities(Payment\n ).group_by(Payment)\n for payment in payments:\n payment.lock()\n app.logger.info('Cancelling payment %s', payment.id)\n assert payment.state == 'new' and payment.provider in {'gocardless'\n , 'stripe'}\n payment.cancel()\n purchases = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3),\n Purchase.payment_id.is_(None))\n for purchase in purchases:\n app.logger.info('Cancelling purchase %s', purchase.id)\n purchase.cancel()\n db.session.commit()\n\n\nclass SendTransferReminder(Command):\n\n def run(self):\n pass\n\n\nclass SendTickets(Command):\n\n def run(self):\n pass\n",
"step-4": "from datetime import datetime, timedelta\nfrom flask import current_app as app\nfrom flask_script import Command\nfrom main import db\nfrom models.payment import Payment\nfrom models.product import ProductGroup, Product, PriceTier, Price, ProductView, ProductViewProduct\nfrom models.purchase import Purchase\n\n\ndef create_product_groups():\n top_level_groups = [('admissions', datetime(2018, 9, 3), app.config.get\n ('MAXIMUM_ADMISSIONS')), ('parking', datetime(2018, 9, 3), None), (\n 'campervan', datetime(2018, 9, 3), None), ('merchandise', datetime(\n 2018, 8, 12), None)]\n for name, expires, capacity in top_level_groups:\n if ProductGroup.get_by_name(name):\n continue\n pg = ProductGroup(name=name, type=name, capacity_max=capacity,\n expires=expires)\n db.session.add(pg)\n db.session.flush()\n allocations = [('vendors', 100), ('sponsors', 200), ('speakers', 100),\n ('general', 800)]\n admissions = ProductGroup.get_by_name('admissions')\n for name, capacity in allocations:\n if ProductGroup.get_by_name(name):\n continue\n ProductGroup(name=name, capacity_max=capacity, parent=admissions)\n view = ProductView.get_by_name('main')\n if not view:\n view = ProductView('main', 'tickets')\n db.session.add(view)\n db.session.flush()\n general = ProductGroup.get_by_name('general')\n products = [('full', 'Full Camp Ticket', True, True, None,\n 'Full ticket', ((1500, 115, 135), (250, 105, 125), (None, 125, 145)\n )), ('full-s', 'Full Camp Ticket (Supporter)', True, True, None,\n 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.'\n , ((None, 150, 180),)), ('full-sg',\n 'Full Camp Ticket (Gold Supporter)', True, True, None,\n 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.'\n , ((None, 200, 240),)), ('u18', 'Under-18', True, False, 150,\n 'For visitors born after August 30th, 2000. All under-18s must be accompanied by an adult.'\n , ((None, 55, 63),)), ('u12', 'Under-12', True, False, 50,\n 'For children born after August 30th, 2006. All children must be accompanied by an adult.'\n , ((None, 0, 0),))]\n order = 0\n for name, display_name, has_xfer, has_badge, capacity, description, prices in products:\n if Product.get_by_name('general', name):\n continue\n product = Product(name=name, display_name=display_name,\n capacity_max=capacity, description=description, parent=general,\n attributes={'is_transferable': has_xfer, 'has_badge': has_badge})\n for index, (price_cap, gbp, eur) in enumerate(prices):\n if len(prices) == 1 or index == 0:\n tier_name = name + '-std'\n active = True\n elif index == 1:\n tier_name = name + '-early-bird'\n active = False\n elif index == 2:\n tier_name = name + '-late'\n active = False\n if PriceTier.get_by_name('general', 'name', tier_name):\n continue\n pt = PriceTier(name=tier_name, capacity_max=price_cap,\n personal_limit=10, parent=product, active=active)\n Price(currency='GBP', price_int=gbp * 100, price_tier=pt)\n Price(currency='EUR', price_int=eur * 100, price_tier=pt)\n ProductViewProduct(view, product, order)\n order += 1\n db.session.flush()\n misc = [('parking', 'Parking Ticket', 1700, 4, 15, 21,\n \"We're trying to keep cars to a minimum. Please take public transport or car-share if you can.\"\n ), ('campervan', 'Caravan/\\u200cCampervan Ticket', 60, 2, 30, 42,\n \"If you bring a caravan, you won't need a separate parking ticket for the towing car.\"\n )]\n for name, display_name, cap, personal_limit, gbp, eur, description in misc:\n if Product.get_by_name(name, name):\n continue\n group = ProductGroup.get_by_name(name)\n product = Product(name=name, display_name=display_name, description\n =description, parent=group)\n pt = PriceTier(name=name, personal_limit=personal_limit, parent=product\n )\n db.session.add(pt)\n db.session.add(Price(currency='GBP', price_int=gbp * 100,\n price_tier=pt))\n db.session.add(Price(currency='EUR', price_int=eur * 100,\n price_tier=pt))\n ProductViewProduct(view, product, order)\n order += 1\n db.session.commit()\n\n\nclass CreateTickets(Command):\n\n def run(self):\n create_product_groups()\n\n\nclass CancelReservedTickets(Command):\n\n def run(self):\n payments = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3), ~\n Purchase.payment_id.is_(None)).join(Payment).with_entities(Payment\n ).group_by(Payment)\n for payment in payments:\n payment.lock()\n app.logger.info('Cancelling payment %s', payment.id)\n assert payment.state == 'new' and payment.provider in {'gocardless'\n , 'stripe'}\n payment.cancel()\n purchases = Purchase.query.filter(Purchase.state == 'reserved', \n Purchase.modified < datetime.utcnow() - timedelta(days=3),\n Purchase.payment_id.is_(None))\n for purchase in purchases:\n app.logger.info('Cancelling purchase %s', purchase.id)\n purchase.cancel()\n db.session.commit()\n\n\nclass SendTransferReminder(Command):\n\n def run(self):\n pass\n\n\nclass SendTickets(Command):\n\n def run(self):\n pass\n",
"step-5": "# coding=utf-8\n\nfrom datetime import datetime, timedelta\n\nfrom flask import current_app as app\nfrom flask_script import Command\nfrom main import db\nfrom models.payment import Payment\nfrom models.product import ProductGroup, Product, PriceTier, Price, ProductView, ProductViewProduct\nfrom models.purchase import Purchase\n\n\ndef create_product_groups():\n top_level_groups = [\n # name, capacity, expires\n ('admissions', datetime(2018, 9, 3), app.config.get('MAXIMUM_ADMISSIONS')),\n ('parking', datetime(2018, 9, 3), None),\n ('campervan', datetime(2018, 9, 3), None),\n ('merchandise', datetime(2018, 8, 12), None),\n ]\n for name, expires, capacity in top_level_groups:\n if ProductGroup.get_by_name(name):\n continue\n pg = ProductGroup(name=name, type=name, capacity_max=capacity, expires=expires)\n db.session.add(pg)\n\n db.session.flush()\n\n allocations = [\n # name, capacity\n ('vendors', 100),\n ('sponsors', 200),\n ('speakers', 100),\n ('general', 800),\n ]\n\n admissions = ProductGroup.get_by_name('admissions')\n for name, capacity in allocations:\n if ProductGroup.get_by_name(name):\n continue\n ProductGroup(name=name, capacity_max=capacity, parent=admissions)\n\n view = ProductView.get_by_name('main')\n if not view:\n view = ProductView('main', 'tickets')\n db.session.add(view)\n\n db.session.flush()\n\n general = ProductGroup.get_by_name('general')\n\n products = [\n # name, display name, transferable, badge, capacity, description, (std cap, gbp eur), (early cap, gbp, eur), (late cap, gbp, eur)\n ('full', 'Full Camp Ticket', True, True, None, 'Full ticket',\n ((1500, 115, 135), (250, 105, 125), (None, 125, 145))\n ),\n ('full-s', 'Full Camp Ticket (Supporter)', True, True, None, 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.',\n ((None, 150, 180),)\n ),\n ('full-sg', 'Full Camp Ticket (Gold Supporter)', True, True, None, 'Support this non-profit event by paying a bit more. All money will go towards making EMF more awesome.',\n ((None, 200, 240),)\n ),\n ('u18', 'Under-18', True, False, 150, 'For visitors born after August 30th, 2000. All under-18s must be accompanied by an adult.',\n ((None, 55, 63),)\n ),\n ('u12', 'Under-12', True, False, 50, 'For children born after August 30th, 2006. All children must be accompanied by an adult.',\n ((None, 0, 0),)\n ),\n ]\n\n order = 0\n\n for name, display_name, has_xfer, has_badge, capacity, description, prices in products:\n if Product.get_by_name('general', name):\n continue\n product = Product(name=name, display_name=display_name, capacity_max=capacity,\n description=description, parent=general,\n attributes={'is_transferable': has_xfer,\n 'has_badge': has_badge})\n\n for index, (price_cap, gbp, eur) in enumerate(prices):\n if len(prices) == 1 or index == 0:\n tier_name = name + '-std'\n active = True\n\n elif index == 1:\n tier_name = name + '-early-bird'\n active = False\n\n elif index == 2:\n tier_name = name + '-late'\n active = False\n\n if PriceTier.get_by_name('general', 'name', tier_name):\n continue\n\n pt = PriceTier(name=tier_name, capacity_max=price_cap, personal_limit=10, parent=product, active=active)\n Price(currency='GBP', price_int=gbp * 100, price_tier=pt)\n Price(currency='EUR', price_int=eur * 100, price_tier=pt)\n\n ProductViewProduct(view, product, order)\n order += 1\n\n db.session.flush()\n\n misc = [\n # name, display_name, cap, personal_limit, gbp, eur, description\n ('parking', 'Parking Ticket', 1700, 4, 15, 21, \"We're trying to keep cars to a minimum. Please take public transport or car-share if you can.\"),\n ('campervan', 'Caravan/\\u200cCampervan Ticket', 60, 2, 30, 42, \"If you bring a caravan, you won't need a separate parking ticket for the towing car.\"),\n ]\n\n for name, display_name, cap, personal_limit, gbp, eur, description in misc:\n if Product.get_by_name(name, name):\n continue\n\n group = ProductGroup.get_by_name(name)\n product = Product(name=name, display_name=display_name, description=description, parent=group)\n pt = PriceTier(name=name, personal_limit=personal_limit, parent=product)\n db.session.add(pt)\n db.session.add(Price(currency='GBP', price_int=gbp * 100, price_tier=pt))\n db.session.add(Price(currency='EUR', price_int=eur * 100, price_tier=pt))\n\n ProductViewProduct(view, product, order)\n order += 1\n\n db.session.commit()\n\n # ('t-shirt', 'T-Shirt', 200, 10, 10, 12, \"Pre-order the official Electromagnetic Field t-shirt. T-shirts will be available to collect during the event.\"),\n\nclass CreateTickets(Command):\n def run(self):\n create_product_groups()\n\n\nclass CancelReservedTickets(Command):\n def run(self):\n # Payments where someone started the process but didn't complete\n payments = Purchase.query.filter(\n Purchase.state == 'reserved',\n Purchase.modified < datetime.utcnow() - timedelta(days=3),\n ~Purchase.payment_id.is_(None),\n ).join(Payment).with_entities(Payment).group_by(Payment)\n\n for payment in payments:\n payment.lock()\n app.logger.info('Cancelling payment %s', payment.id)\n assert payment.state == 'new' and payment.provider in {'gocardless', 'stripe'}\n payment.cancel()\n\n # Purchases that were added to baskets but not checked out\n purchases = Purchase.query.filter(\n Purchase.state == 'reserved',\n Purchase.modified < datetime.utcnow() - timedelta(days=3),\n Purchase.payment_id.is_(None),\n )\n for purchase in purchases:\n app.logger.info('Cancelling purchase %s', purchase.id)\n purchase.cancel()\n\n db.session.commit()\n\nclass SendTransferReminder(Command):\n\n def run(self):\n pass\n # users_to_email = User.query.join(Ticket, TicketType).filter(\n # TicketType.admits == 'full',\n # Ticket.paid == True, # noqa: E712\n # Ticket.transfer_reminder_sent == False,\n # ).group_by(User).having(func.count() > 1)\n\n # for user in users_to_email:\n # msg = Message(\"Your Electromagnetic Field Tickets\",\n # sender=app.config['TICKETS_EMAIL'],\n # recipients=[user.email])\n\n # msg.body = render_template(\"emails/transfer-reminder.txt\", user=user)\n\n # app.logger.info('Emailing %s transfer reminder', user.email)\n # mail.send(msg)\n\n # for ticket in user.tickets:\n # ticket.transfer_reminder_sent = True\n # db.session.commit()\n\n\nclass SendTickets(Command):\n\n def run(self):\n pass\n # paid_items = Ticket.query.filter_by(paid=True).join(TicketType).filter(or_(\n # TicketType.admits.in_(['full', 'kid', 'car', 'campervan']),\n # TicketType.fixed_id.in_(range(14, 24))))\n\n # users = (paid_items.filter(Ticket.emailed == False).join(User) # noqa: E712\n # .group_by(User).with_entities(User).order_by(User.id))\n\n # for user in users:\n # user_tickets = Ticket.query.filter_by(paid=True).join(TicketType, User).filter(\n # TicketType.admits.in_(['full', 'kid', 'car', 'campervan']),\n # User.id == user.id)\n\n # plural = (user_tickets.count() != 1 and 's' or '')\n\n # msg = Message(\"Your Electromagnetic Field Ticket%s\" % plural,\n # sender=app.config['TICKETS_EMAIL'],\n # recipients=[user.email])\n\n # msg.body = render_template(\"emails/receipt.txt\", user=user)\n\n # attach_tickets(msg, user)\n\n # app.logger.info('Emailing %s receipt for %s tickets', user.email, user_tickets.count())\n # mail.send(msg)\n\n # db.session.commit()\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
<|reserved_special_token_1|>
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @Author: huerke
# @Date: 2016-09-03 10:55:54
# @Last Modified by: huerke
# @Last Modified time: 2016-09-03 15:54:50
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
flexible
|
{
"blob_id": "021cbd1bd22f9ec48db2e52b2a98be169bbfdbbd",
"index": 5979,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@main.app_errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n",
"step-4": "from flask import render_template\nfrom . import main\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@main.app_errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Author: huerke\n# @Date: 2016-09-03 10:55:54\n# @Last Modified by: huerke\n# @Last Modified time: 2016-09-03 15:54:50\nfrom flask import render_template\nfrom . import main\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@main.app_errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from os import environ as env
import json
import utils
import utils.aws as aws
import utils.handlers as handlers
def put_record_to_logstream(event: utils.LambdaEvent) -> str:
"""Put a record of source Lambda execution in LogWatch Logs."""
log_group_name = env["REPORT_LOG_GROUP_NAME"]
utils.Log.info("Fetching requestPayload and responsePayload")
req, res = event["requestPayload"], event["responsePayload"]
utils.Log.info("Fetching requestPayload content")
sns_payload = req["Records"][0]["Sns"]
message_id = sns_payload["MessageId"]
message = json.loads(sns_payload["Message"])
url, title = message["url"], message["title"]
try:
body = json.loads(res["body"])
except json.JSONDecodeError as error:
raise utils.HandledError("Failed decoding payload: %s" % error)
name, timestamp = body["name"], body["timestamp"]
if res["statusCode"] != 200:
raise utils.HandledError("Source lambda '%s' failed with status code %d, "
"ignoring report" % (name, res["statusCode"]))
return aws.send_event_to_logstream(log_group=log_group_name,
log_stream=name,
message={
"url": url,
"MessageId": message_id,
"title": title,
"timestamp": timestamp,
})
def handler(event, context) -> utils.Response:
"""Lambda entry point."""
return handlers.EventHandler(
name="send_report",
event=utils.LambdaEvent(event),
context=utils.LambdaContext(context),
action=put_record_to_logstream,
).response
|
normal
|
{
"blob_id": "01d545e77c211201332a637a493d27608721aad5",
"index": 7004,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef put_record_to_logstream(event: utils.LambdaEvent) ->str:\n \"\"\"Put a record of source Lambda execution in LogWatch Logs.\"\"\"\n log_group_name = env['REPORT_LOG_GROUP_NAME']\n utils.Log.info('Fetching requestPayload and responsePayload')\n req, res = event['requestPayload'], event['responsePayload']\n utils.Log.info('Fetching requestPayload content')\n sns_payload = req['Records'][0]['Sns']\n message_id = sns_payload['MessageId']\n message = json.loads(sns_payload['Message'])\n url, title = message['url'], message['title']\n try:\n body = json.loads(res['body'])\n except json.JSONDecodeError as error:\n raise utils.HandledError('Failed decoding payload: %s' % error)\n name, timestamp = body['name'], body['timestamp']\n if res['statusCode'] != 200:\n raise utils.HandledError(\n \"Source lambda '%s' failed with status code %d, ignoring report\" %\n (name, res['statusCode']))\n return aws.send_event_to_logstream(log_group=log_group_name, log_stream\n =name, message={'url': url, 'MessageId': message_id, 'title': title,\n 'timestamp': timestamp})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef put_record_to_logstream(event: utils.LambdaEvent) ->str:\n \"\"\"Put a record of source Lambda execution in LogWatch Logs.\"\"\"\n log_group_name = env['REPORT_LOG_GROUP_NAME']\n utils.Log.info('Fetching requestPayload and responsePayload')\n req, res = event['requestPayload'], event['responsePayload']\n utils.Log.info('Fetching requestPayload content')\n sns_payload = req['Records'][0]['Sns']\n message_id = sns_payload['MessageId']\n message = json.loads(sns_payload['Message'])\n url, title = message['url'], message['title']\n try:\n body = json.loads(res['body'])\n except json.JSONDecodeError as error:\n raise utils.HandledError('Failed decoding payload: %s' % error)\n name, timestamp = body['name'], body['timestamp']\n if res['statusCode'] != 200:\n raise utils.HandledError(\n \"Source lambda '%s' failed with status code %d, ignoring report\" %\n (name, res['statusCode']))\n return aws.send_event_to_logstream(log_group=log_group_name, log_stream\n =name, message={'url': url, 'MessageId': message_id, 'title': title,\n 'timestamp': timestamp})\n\n\ndef handler(event, context) ->utils.Response:\n \"\"\"Lambda entry point.\"\"\"\n return handlers.EventHandler(name='send_report', event=utils.\n LambdaEvent(event), context=utils.LambdaContext(context), action=\n put_record_to_logstream).response\n",
"step-4": "from os import environ as env\nimport json\nimport utils\nimport utils.aws as aws\nimport utils.handlers as handlers\n\n\ndef put_record_to_logstream(event: utils.LambdaEvent) ->str:\n \"\"\"Put a record of source Lambda execution in LogWatch Logs.\"\"\"\n log_group_name = env['REPORT_LOG_GROUP_NAME']\n utils.Log.info('Fetching requestPayload and responsePayload')\n req, res = event['requestPayload'], event['responsePayload']\n utils.Log.info('Fetching requestPayload content')\n sns_payload = req['Records'][0]['Sns']\n message_id = sns_payload['MessageId']\n message = json.loads(sns_payload['Message'])\n url, title = message['url'], message['title']\n try:\n body = json.loads(res['body'])\n except json.JSONDecodeError as error:\n raise utils.HandledError('Failed decoding payload: %s' % error)\n name, timestamp = body['name'], body['timestamp']\n if res['statusCode'] != 200:\n raise utils.HandledError(\n \"Source lambda '%s' failed with status code %d, ignoring report\" %\n (name, res['statusCode']))\n return aws.send_event_to_logstream(log_group=log_group_name, log_stream\n =name, message={'url': url, 'MessageId': message_id, 'title': title,\n 'timestamp': timestamp})\n\n\ndef handler(event, context) ->utils.Response:\n \"\"\"Lambda entry point.\"\"\"\n return handlers.EventHandler(name='send_report', event=utils.\n LambdaEvent(event), context=utils.LambdaContext(context), action=\n put_record_to_logstream).response\n",
"step-5": "from os import environ as env\nimport json\n\nimport utils\nimport utils.aws as aws\nimport utils.handlers as handlers\n\n\ndef put_record_to_logstream(event: utils.LambdaEvent) -> str:\n \"\"\"Put a record of source Lambda execution in LogWatch Logs.\"\"\"\n log_group_name = env[\"REPORT_LOG_GROUP_NAME\"]\n\n utils.Log.info(\"Fetching requestPayload and responsePayload\")\n req, res = event[\"requestPayload\"], event[\"responsePayload\"]\n\n utils.Log.info(\"Fetching requestPayload content\")\n sns_payload = req[\"Records\"][0][\"Sns\"]\n\n message_id = sns_payload[\"MessageId\"]\n message = json.loads(sns_payload[\"Message\"])\n url, title = message[\"url\"], message[\"title\"]\n\n try:\n body = json.loads(res[\"body\"])\n\n except json.JSONDecodeError as error:\n raise utils.HandledError(\"Failed decoding payload: %s\" % error)\n\n name, timestamp = body[\"name\"], body[\"timestamp\"]\n\n if res[\"statusCode\"] != 200:\n raise utils.HandledError(\"Source lambda '%s' failed with status code %d, \"\n \"ignoring report\" % (name, res[\"statusCode\"]))\n\n return aws.send_event_to_logstream(log_group=log_group_name,\n log_stream=name,\n message={\n \"url\": url,\n \"MessageId\": message_id,\n \"title\": title,\n \"timestamp\": timestamp,\n })\n\n\ndef handler(event, context) -> utils.Response:\n \"\"\"Lambda entry point.\"\"\"\n return handlers.EventHandler(\n name=\"send_report\",\n event=utils.LambdaEvent(event),\n context=utils.LambdaContext(context),\n action=put_record_to_logstream,\n ).response\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import botocore
class s3Obj:
def __init__(self, name, bucket_name, size, last_modified, storage_class):
self.name = name
self.size = size
self.last_modified = last_modified
self.storage_class = storage_class
self.bucket_name = bucket_name
self.acl = []
def getACL(self, client_s3):
'''
get ACL info and update the object
'''
try:
response = client_s3.get_object_acl(Bucket=self.bucket_name, Key=self.name)
for permission in response['Grants']:
self.acl.append(permission['Permission'])
except botocore.exceptions.ClientError as e:
raise
|
normal
|
{
"blob_id": "b3f376f4aec81cae853f996a74062e32bb4a8fa3",
"index": 2569,
"step-1": "<mask token>\n\n\nclass s3Obj:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass s3Obj:\n <mask token>\n\n def getACL(self, client_s3):\n \"\"\"\n get ACL info and update the object\n \"\"\"\n try:\n response = client_s3.get_object_acl(Bucket=self.bucket_name,\n Key=self.name)\n for permission in response['Grants']:\n self.acl.append(permission['Permission'])\n except botocore.exceptions.ClientError as e:\n raise\n",
"step-3": "<mask token>\n\n\nclass s3Obj:\n\n def __init__(self, name, bucket_name, size, last_modified, storage_class):\n self.name = name\n self.size = size\n self.last_modified = last_modified\n self.storage_class = storage_class\n self.bucket_name = bucket_name\n self.acl = []\n\n def getACL(self, client_s3):\n \"\"\"\n get ACL info and update the object\n \"\"\"\n try:\n response = client_s3.get_object_acl(Bucket=self.bucket_name,\n Key=self.name)\n for permission in response['Grants']:\n self.acl.append(permission['Permission'])\n except botocore.exceptions.ClientError as e:\n raise\n",
"step-4": "import botocore\n\n\nclass s3Obj:\n\n def __init__(self, name, bucket_name, size, last_modified, storage_class):\n self.name = name\n self.size = size\n self.last_modified = last_modified\n self.storage_class = storage_class\n self.bucket_name = bucket_name\n self.acl = []\n\n def getACL(self, client_s3):\n \"\"\"\n get ACL info and update the object\n \"\"\"\n try:\n response = client_s3.get_object_acl(Bucket=self.bucket_name,\n Key=self.name)\n for permission in response['Grants']:\n self.acl.append(permission['Permission'])\n except botocore.exceptions.ClientError as e:\n raise\n",
"step-5": "import botocore\n\nclass s3Obj:\n \n def __init__(self, name, bucket_name, size, last_modified, storage_class):\n \n self.name = name\n self.size = size\n self.last_modified = last_modified\n self.storage_class = storage_class\n self.bucket_name = bucket_name\n self.acl = []\n\n def getACL(self, client_s3):\n '''\n get ACL info and update the object\n '''\n \n try:\n \n response = client_s3.get_object_acl(Bucket=self.bucket_name, Key=self.name)\n \n for permission in response['Grants']:\n \n self.acl.append(permission['Permission'])\n \n except botocore.exceptions.ClientError as e:\n \n raise",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,
t_m2p, Q, E, hkl_projection, Nbuffer=100000)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
beam_neutrons_path = (
'/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'
)
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = (
'/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'
)
psi = -0.005846744654920276
hkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -
0.80916512], [-0.66340068, -0.4633009, -0.80916512]])
pp = array([-0.88585691, 2.86622706, -0.61241657])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],
pp[0]))
t_m2p = 0.007188343409318038
Q = array([4.75696626, -3.03446862, 0.64836415])
E = 8.449417182910302
hkl_projection = array([0.70608101, 0.61545409, 0.14251389])
urc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,
t_m2p, Q, E, hkl_projection, Nbuffer=100000)
<|reserved_special_token_1|>
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = (
'/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'
)
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = (
'/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'
)
psi = -0.005846744654920276
hkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -
0.80916512], [-0.66340068, -0.4633009, -0.80916512]])
pp = array([-0.88585691, 2.86622706, -0.61241657])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],
pp[0]))
t_m2p = 0.007188343409318038
Q = array([4.75696626, -3.03446862, 0.64836415])
E = 8.449417182910302
hkl_projection = array([0.70608101, 0.61545409, 0.14251389])
urc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,
t_m2p, Q, E, hkl_projection, Nbuffer=100000)
<|reserved_special_token_1|>
#!/usr/bin/env python
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'
psi = -0.005846744654920276
hkl2Q = array([[-0.65520642, 0.93819023, 0. ],
[ 0.66340068, 0.4633009 , -0.80916512],
[-0.66340068, -0.4633009 , -0.80916512]])
pp = array([-0.88585691, 2.86622706, -0.61241657])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))
t_m2p = 0.0071883434093180376
Q = array([ 4.75696626, -3.03446862, 0.64836415])
E = 8.4494171829103024
hkl_projection = array([ 0.70608101, 0.61545409, 0.14251389])
urc.run(
beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,
Q, E, hkl_projection, Nbuffer=100000)
|
flexible
|
{
"blob_id": "de286b94e09db477e3d920a9eff1a299474baf20",
"index": 2614,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n",
"step-3": "<mask token>\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\n )\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -\n 0.80916512], [-0.66340068, -0.4633009, -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.007188343409318038\nQ = array([4.75696626, -3.03446862, 0.64836415])\nE = 8.449417182910302\nhkl_projection = array([0.70608101, 0.61545409, 0.14251389])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n",
"step-4": "import mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\n )\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -\n 0.80916512], [-0.66340068, -0.4633009, -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.007188343409318038\nQ = array([4.75696626, -3.03446862, 0.64836415])\nE = 8.449417182910302\nhkl_projection = array([0.70608101, 0.61545409, 0.14251389])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n",
"step-5": "#!/usr/bin/env python\nimport mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0. ],\n [ 0.66340068, 0.4633009 , -0.80916512],\n [-0.66340068, -0.4633009 , -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))\nt_m2p = 0.0071883434093180376\nQ = array([ 4.75696626, -3.03446862, 0.64836415])\nE = 8.4494171829103024\nhkl_projection = array([ 0.70608101, 0.61545409, 0.14251389])\nurc.run(\n beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,\n Q, E, hkl_projection, Nbuffer=100000)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_square_sum(num):
_sum = 0
while num > 0:
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow)
fast = find_square_sum(find_square_sum(fast))
if slow == fast:
break
return slow == 1
def find_square_sum(num):
_sum = 0
while num > 0:
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow)
fast = find_square_sum(find_square_sum(fast))
if slow == fast:
break
return slow == 1
def find_square_sum(num):
_sum = 0
while num > 0:
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
print(find_happy_number(23))
print(find_happy_number(12))
<|reserved_special_token_1|>
def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow) # move one step
fast = find_square_sum(find_square_sum(fast)) # move two steps
if slow == fast: # found the cycle
break
return slow == 1 # see if the cycle is stuck on the number '1'
def find_square_sum(num):
_sum = 0
while (num > 0):
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
print(find_happy_number(23))
print(find_happy_number(12))
|
flexible
|
{
"blob_id": "60b5e515c7275bfa0f79e22f54302a578c2f7b79",
"index": 728,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_square_sum(num):\n _sum = 0\n while num > 0:\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\n\n<mask token>\n",
"step-3": "def find_happy_number(num):\n slow, fast = num, num\n while True:\n slow = find_square_sum(slow)\n fast = find_square_sum(find_square_sum(fast))\n if slow == fast:\n break\n return slow == 1\n\n\ndef find_square_sum(num):\n _sum = 0\n while num > 0:\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\n\n<mask token>\n",
"step-4": "def find_happy_number(num):\n slow, fast = num, num\n while True:\n slow = find_square_sum(slow)\n fast = find_square_sum(find_square_sum(fast))\n if slow == fast:\n break\n return slow == 1\n\n\ndef find_square_sum(num):\n _sum = 0\n while num > 0:\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\n\nprint(find_happy_number(23))\nprint(find_happy_number(12))\n",
"step-5": "def find_happy_number(num):\n slow, fast = num, num\n while True:\n slow = find_square_sum(slow) # move one step\n fast = find_square_sum(find_square_sum(fast)) # move two steps\n if slow == fast: # found the cycle\n break\n return slow == 1 # see if the cycle is stuck on the number '1'\n\n\ndef find_square_sum(num):\n _sum = 0\n while (num > 0):\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\nprint(find_happy_number(23)) \nprint(find_happy_number(12))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Overview file
#import python classes
import numpy as np
import random as rn
import math
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
#import self produced classes
import forcemodule as fm
import init_sys
# independent parameters
dt = 0.004
N=2048
lpnum = 1000
density = 0.85
temp = 0.8
# Loading initial conditions
mom = init_sys.init_mom(N, temp)
pos, l = init_sys.init_pos(N, density)
forces = init_sys.init_forc(N)
pot = init_sys.init_pot(N)
print N, 'N'
# Iteration Verlet method
forces, pot = fm.calc_forces(pos,forces,pot,l,[N])
formersummom = 0
for lp in range(lpnum):
mom = mom + forces*0.5*dt
pos = (pos + mom*dt) % l # % l means modulo of l, hence it adds/subtracts n*l untill 0<pos<l
forces, pot = fm.calc_forces(pos,forces,pot,l,[N])
mom = mom + forces*0.5*dt
Ken = np.sum(mom*mom*0.5, axis=1)
toten = sum(Ken) - sum(pot)
print toten, np.sum(mom)
'''
fig = pylab.figure()
ax = Axes3D(fig)
ax.scatter(pos[:,0],pos[:,1],pos[:,2],c='b')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
'''
# Plotting the positions
|
normal
|
{
"blob_id": "63c214d9e831356345ba2eee68634af36964dcff",
"index": 550,
"step-1": "# Overview file\n\n#import python classes\nimport numpy as np\nimport random as rn\nimport math\nimport matplotlib.pyplot as plt\nimport pylab\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n#import self produced classes\nimport forcemodule as fm\nimport init_sys\n\n\n# independent parameters\ndt = 0.004\nN=2048\nlpnum = 1000\ndensity = 0.85\ntemp = 0.8\n\n\n# Loading initial conditions\nmom = init_sys.init_mom(N, temp) \npos, l = init_sys.init_pos(N, density) \nforces = init_sys.init_forc(N)\npot = init_sys.init_pot(N)\n\nprint N, 'N'\n\n\n\n\n# Iteration Verlet method\n\nforces, pot = fm.calc_forces(pos,forces,pot,l,[N])\nformersummom = 0\nfor lp in range(lpnum):\n mom = mom + forces*0.5*dt\n pos = (pos + mom*dt) % l # % l means modulo of l, hence it adds/subtracts n*l untill 0<pos<l\n forces, pot = fm.calc_forces(pos,forces,pot,l,[N])\n mom = mom + forces*0.5*dt\n Ken = np.sum(mom*mom*0.5, axis=1)\n toten = sum(Ken) - sum(pot)\n print toten, np.sum(mom)\n'''\n fig = pylab.figure()\n ax = Axes3D(fig) \n ax.scatter(pos[:,0],pos[:,1],pos[:,2],c='b')\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()\n '''\n\n\n\n\n# Plotting the positions\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
from odoo import api, fields, _, models
Type_employee = [('j', 'Journalier'), ('m', 'Mensuel')]
class HrCnpsSettings(models.Model):
_name = "hr.cnps.setting"
_description = "settings of CNPS"
name = fields.Char("Libellé", required=True)
active = fields.Boolean("Actif", default=True)
sequence = fields.Integer('Sequence', default=10)
amount_min = fields.Float("Montant Min")
amount_max = fields.Float('Montant Max')
type = fields.Selection(Type_employee, 'Type', required=False, default=False)
class HrCnpsCotisationLineTemplate(models.Model):
_name = "hr.cnps.cotisation.line.template"
_description = "hr cnps cotisation line template"
name = fields.Char("Designation", required=True)
company_id = fields.Many2one("res.company", "Société", required=True, default=lambda self: self.env.user.company_id.id)
taux = fields.Float("Taux")
sequence = fields.Integer("Sequence", default=10)
active = fields.Boolean("Actif", default=True)
type = fields.Selection([('cnps', 'Régime de retraite'), ('other', 'Autres régimes')], 'Type')
account_id = fields.Many2one('account.account', 'Compte comptable associé', required=False, domain="[('company_id', '=', company_id)]")
|
normal
|
{
"blob_id": "4f7b689c06383673b510092932b051c644306b84",
"index": 3500,
"step-1": "<mask token>\n\n\nclass HrCnpsSettings(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass HrCnpsCotisationLineTemplate(models.Model):\n _name = 'hr.cnps.cotisation.line.template'\n _description = 'hr cnps cotisation line template'\n name = fields.Char('Designation', required=True)\n company_id = fields.Many2one('res.company', 'Société', required=True,\n default=lambda self: self.env.user.company_id.id)\n taux = fields.Float('Taux')\n sequence = fields.Integer('Sequence', default=10)\n active = fields.Boolean('Actif', default=True)\n type = fields.Selection([('cnps', 'Régime de retraite'), ('other',\n 'Autres régimes')], 'Type')\n account_id = fields.Many2one('account.account',\n 'Compte comptable associé', required=False, domain=\n \"[('company_id', '=', company_id)]\")\n",
"step-2": "<mask token>\n\n\nclass HrCnpsSettings(models.Model):\n _name = 'hr.cnps.setting'\n _description = 'settings of CNPS'\n name = fields.Char('Libellé', required=True)\n active = fields.Boolean('Actif', default=True)\n sequence = fields.Integer('Sequence', default=10)\n amount_min = fields.Float('Montant Min')\n amount_max = fields.Float('Montant Max')\n type = fields.Selection(Type_employee, 'Type', required=False, default=\n False)\n\n\nclass HrCnpsCotisationLineTemplate(models.Model):\n _name = 'hr.cnps.cotisation.line.template'\n _description = 'hr cnps cotisation line template'\n name = fields.Char('Designation', required=True)\n company_id = fields.Many2one('res.company', 'Société', required=True,\n default=lambda self: self.env.user.company_id.id)\n taux = fields.Float('Taux')\n sequence = fields.Integer('Sequence', default=10)\n active = fields.Boolean('Actif', default=True)\n type = fields.Selection([('cnps', 'Régime de retraite'), ('other',\n 'Autres régimes')], 'Type')\n account_id = fields.Many2one('account.account',\n 'Compte comptable associé', required=False, domain=\n \"[('company_id', '=', company_id)]\")\n",
"step-3": "<mask token>\nType_employee = [('j', 'Journalier'), ('m', 'Mensuel')]\n\n\nclass HrCnpsSettings(models.Model):\n _name = 'hr.cnps.setting'\n _description = 'settings of CNPS'\n name = fields.Char('Libellé', required=True)\n active = fields.Boolean('Actif', default=True)\n sequence = fields.Integer('Sequence', default=10)\n amount_min = fields.Float('Montant Min')\n amount_max = fields.Float('Montant Max')\n type = fields.Selection(Type_employee, 'Type', required=False, default=\n False)\n\n\nclass HrCnpsCotisationLineTemplate(models.Model):\n _name = 'hr.cnps.cotisation.line.template'\n _description = 'hr cnps cotisation line template'\n name = fields.Char('Designation', required=True)\n company_id = fields.Many2one('res.company', 'Société', required=True,\n default=lambda self: self.env.user.company_id.id)\n taux = fields.Float('Taux')\n sequence = fields.Integer('Sequence', default=10)\n active = fields.Boolean('Actif', default=True)\n type = fields.Selection([('cnps', 'Régime de retraite'), ('other',\n 'Autres régimes')], 'Type')\n account_id = fields.Many2one('account.account',\n 'Compte comptable associé', required=False, domain=\n \"[('company_id', '=', company_id)]\")\n",
"step-4": "from odoo import api, fields, _, models\nType_employee = [('j', 'Journalier'), ('m', 'Mensuel')]\n\n\nclass HrCnpsSettings(models.Model):\n _name = 'hr.cnps.setting'\n _description = 'settings of CNPS'\n name = fields.Char('Libellé', required=True)\n active = fields.Boolean('Actif', default=True)\n sequence = fields.Integer('Sequence', default=10)\n amount_min = fields.Float('Montant Min')\n amount_max = fields.Float('Montant Max')\n type = fields.Selection(Type_employee, 'Type', required=False, default=\n False)\n\n\nclass HrCnpsCotisationLineTemplate(models.Model):\n _name = 'hr.cnps.cotisation.line.template'\n _description = 'hr cnps cotisation line template'\n name = fields.Char('Designation', required=True)\n company_id = fields.Many2one('res.company', 'Société', required=True,\n default=lambda self: self.env.user.company_id.id)\n taux = fields.Float('Taux')\n sequence = fields.Integer('Sequence', default=10)\n active = fields.Boolean('Actif', default=True)\n type = fields.Selection([('cnps', 'Régime de retraite'), ('other',\n 'Autres régimes')], 'Type')\n account_id = fields.Many2one('account.account',\n 'Compte comptable associé', required=False, domain=\n \"[('company_id', '=', company_id)]\")\n",
"step-5": "# -*- coding:utf-8 -*-\r\n\r\n\r\nfrom odoo import api, fields, _, models\r\n\r\nType_employee = [('j', 'Journalier'), ('m', 'Mensuel')]\r\n\r\nclass HrCnpsSettings(models.Model):\r\n _name = \"hr.cnps.setting\"\r\n _description = \"settings of CNPS\"\r\n\r\n name = fields.Char(\"Libellé\", required=True)\r\n active = fields.Boolean(\"Actif\", default=True)\r\n sequence = fields.Integer('Sequence', default=10)\r\n amount_min = fields.Float(\"Montant Min\")\r\n amount_max = fields.Float('Montant Max')\r\n type = fields.Selection(Type_employee, 'Type', required=False, default=False)\r\n\r\n\r\nclass HrCnpsCotisationLineTemplate(models.Model):\r\n _name = \"hr.cnps.cotisation.line.template\"\r\n _description = \"hr cnps cotisation line template\"\r\n\r\n name = fields.Char(\"Designation\", required=True)\r\n company_id = fields.Many2one(\"res.company\", \"Société\", required=True, default=lambda self: self.env.user.company_id.id)\r\n taux = fields.Float(\"Taux\")\r\n sequence = fields.Integer(\"Sequence\", default=10)\r\n active = fields.Boolean(\"Actif\", default=True)\r\n type = fields.Selection([('cnps', 'Régime de retraite'), ('other', 'Autres régimes')], 'Type')\r\n account_id = fields.Many2one('account.account', 'Compte comptable associé', required=False, domain=\"[('company_id', '=', company_id)]\")",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/python
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import seaborn as sns
import os
from pathlib import Path
#import math
#functions related to elasticity of WLC
def f1(l,lp, kbt):
return kbt/lp*(1./(4*(1.-l)*(1.-l))-.25+l)
def derf1(l,lp,kbt):
return kbt/lp*(.5/((1.-l)*(1.-l)*(1.-l))+1.)
def xdefWLC(kbt, l, p, f):
l0=.9999
lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)
if abs(f)<1.e-5: return 0.0
while abs(l0-lnew)>1.e-5:
l0=lnew
lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)
xdef=l*lnew
return xdef
def intfdexWLC (kbt, l, p, f):
l0=xdefWLC(kbt,l,p,f)
return kbt*l/(4.*p)*(1./(1.-l0/l)+2*l0*l0/(l*l)-l0/l)
def stretching_energy(K,l,p,f,fmin):
total= intfdexWLC(K,l,p,f)-intfdexWLC(K,l,p,fmin)
return total
forces = [ 5 ,6 ,7 , 8, 9, 10, 11, 12]
delimiter = " "
p = 0.75
K = 4.114
l = 0.66
#value of mu(pN/nm), N, etc
Deltamu=2.0*K #chemical potential of 1 ATP 3*K
Deltabp=2.0*K #free-energy of 1 bp opening
n_atp=1. #Number of bp opened by 1-step of the motor
force = forces[7]
#frequency bps
k0=1000000.
#montecarlo step = 1e-7
dt=0.5/(k0*5)
fmin=0.0
'''
print (DeltaG)
print(n_atp*Deltamu)
print(Deltabp)
print(2.*force*xdefWLC(K,l,p,force))
print(2.*stretching_energy(K,l,p,force,fmin))
#time.sleep(30.5)
print("pre")
print(k0*np.exp(-DeltaG/(2.*K))*dt)
print("pun")
print(k0*np.exp(DeltaG/(2.*K))*dt)
'''
for i in range(1,2):
force=forces[i]
fmin=0.0
DeltaG=n_atp*Deltamu-Deltabp+2.*force*xdefWLC(K,l,p,force)-2.*stretching_energy(K,l,p,force,fmin)
Ateo=(n_atp*Deltamu/(2*xdefWLC(K,l,p,force))-Deltabp/(2*xdefWLC(K,l,p,force))+force-1.*stretching_energy(K,l,p,force,fmin)/xdefWLC(K,l,p,force))/K
print(Ateo)
for j in range (0,1):
#CHECK parameter dt is the same than for the files processed in hist
Dt=10*dt*2**j
#print(Dt)
dn=10*2**j
n=0
folder='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'
filein='%s%s_%dhist.txt' %(folder,force,dn)
#File_in Dx, p(Dx)
foldout='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'
#reading input file
data=np.loadtxt(filein)
data2=np.asarray(data)
dx=data2[:,0]
pdx=data2[:,1]
#computing the average <Dx>
dxav=0.
pdxav=0.
##Cal renormalitzar de nou les probabilitats (al descartar el 0 la integral ha canviat)
for k in range(0,len(dx)):
dxav+=dx[k]*pdx[k]
pdxav+=pdx[k]
dxav/=pdxav
pdx/=pdxav
print(force)
print(Dt)
print(dxav)
print("\n")
epsilon=0.08
delta=0.4
y=[]
x=[]
for k in range(0,len(dx)):
if (dx[k]<=0.) & (dx[k]<=-delta):
for l in range(0,len(dx)):
#comprovem si x[l]+x[k]<epsilon --> computem
if (dx[l]>=0.) & (dx[l]>=delta):
if np.absolute(dx[k]+dx[l]) <= epsilon:
x.append(dx[l]/dxav)
y.append((np.log(pdx[l]/pdx[k]))/dxav)
'''
print(x)
#print("\n")
#print(y)
print(len(x))
print("\n")
print(len(y))
#plt.plot(x,y, 'o')
plt.plot(x,y)
plt.show()
'''
#writing to an output file for each DT
fileout='%s%s_%dFT.txt' %(foldout,force,dn)
fout=open(fileout,"w")
#fout.write()
for k in range(0,len(x)):
fout.write('%.12f %.12f\n'%(x[k],y[k]))
fout.close()
|
normal
|
{
"blob_id": "9817600759bc01e89f6c48bdc2d256651aedf74d",
"index": 1788,
"step-1": "<mask token>\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f1(l, lp, kbt):\n return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\n<mask token>\nfor i in range(1, 2):\n force = forces[i]\n fmin = 0.0\n DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force\n ) - 2.0 * stretching_energy(K, l, p, force, fmin)\n Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *\n xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,\n force, fmin) / xdefWLC(K, l, p, force)) / K\n print(Ateo)\n for j in range(0, 1):\n Dt = 10 * dt * 2 ** j\n dn = 10 * 2 ** j\n n = 0\n folder = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n )\n filein = '%s%s_%dhist.txt' % (folder, force, dn)\n foldout = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n )\n data = np.loadtxt(filein)\n data2 = np.asarray(data)\n dx = data2[:, 0]\n pdx = data2[:, 1]\n dxav = 0.0\n pdxav = 0.0\n for k in range(0, len(dx)):\n dxav += dx[k] * pdx[k]\n pdxav += pdx[k]\n dxav /= pdxav\n pdx /= pdxav\n print(force)\n print(Dt)\n print(dxav)\n print('\\n')\n epsilon = 0.08\n delta = 0.4\n y = []\n x = []\n for k in range(0, len(dx)):\n if (dx[k] <= 0.0) & (dx[k] <= -delta):\n for l in range(0, len(dx)):\n if (dx[l] >= 0.0) & (dx[l] >= delta):\n if np.absolute(dx[k] + dx[l]) <= epsilon:\n x.append(dx[l] / dxav)\n y.append(np.log(pdx[l] / pdx[k]) / dxav)\n \"\"\"\t\t\n\t\tprint(x)\n\t\t#print(\"\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t\"\"\"\n fileout = '%s%s_%dFT.txt' % (foldout, force, dn)\n fout = open(fileout, 'w')\n for k in range(0, len(x)):\n fout.write('%.12f %.12f\\n' % (x[k], y[k]))\n fout.close()\n",
"step-3": "<mask token>\n\n\ndef f1(l, lp, kbt):\n return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\nforces = [5, 6, 7, 8, 9, 10, 11, 12]\ndelimiter = ' '\np = 0.75\nK = 4.114\nl = 0.66\nDeltamu = 2.0 * K\nDeltabp = 2.0 * K\nn_atp = 1.0\nforce = forces[7]\nk0 = 1000000.0\ndt = 0.5 / (k0 * 5)\nfmin = 0.0\n<mask token>\nfor i in range(1, 2):\n force = forces[i]\n fmin = 0.0\n DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force\n ) - 2.0 * stretching_energy(K, l, p, force, fmin)\n Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *\n xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,\n force, fmin) / xdefWLC(K, l, p, force)) / K\n print(Ateo)\n for j in range(0, 1):\n Dt = 10 * dt * 2 ** j\n dn = 10 * 2 ** j\n n = 0\n folder = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n )\n filein = '%s%s_%dhist.txt' % (folder, force, dn)\n foldout = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n )\n data = np.loadtxt(filein)\n data2 = np.asarray(data)\n dx = data2[:, 0]\n pdx = data2[:, 1]\n dxav = 0.0\n pdxav = 0.0\n for k in range(0, len(dx)):\n dxav += dx[k] * pdx[k]\n pdxav += pdx[k]\n dxav /= pdxav\n pdx /= pdxav\n print(force)\n print(Dt)\n print(dxav)\n print('\\n')\n epsilon = 0.08\n delta = 0.4\n y = []\n x = []\n for k in range(0, len(dx)):\n if (dx[k] <= 0.0) & (dx[k] <= -delta):\n for l in range(0, len(dx)):\n if (dx[l] >= 0.0) & (dx[l] >= delta):\n if np.absolute(dx[k] + dx[l]) <= epsilon:\n x.append(dx[l] / dxav)\n y.append(np.log(pdx[l] / pdx[k]) / dxav)\n \"\"\"\t\t\n\t\tprint(x)\n\t\t#print(\"\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t\"\"\"\n fileout = '%s%s_%dFT.txt' % (foldout, force, dn)\n fout = open(fileout, 'w')\n for k in range(0, len(x)):\n fout.write('%.12f %.12f\\n' % (x[k], y[k]))\n fout.close()\n",
"step-4": "import glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport seaborn as sns\nimport os\nfrom pathlib import Path\n\n\ndef f1(l, lp, kbt):\n return kbt / lp * (1.0 / (4 * (1.0 - l) * (1.0 - l)) - 0.25 + l)\n\n\ndef derf1(l, lp, kbt):\n return kbt / lp * (0.5 / ((1.0 - l) * (1.0 - l) * (1.0 - l)) + 1.0)\n\n\ndef xdefWLC(kbt, l, p, f):\n l0 = 0.9999\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n if abs(f) < 1e-05:\n return 0.0\n while abs(l0 - lnew) > 1e-05:\n l0 = lnew\n lnew = l0 - (f1(l0, p, kbt) - f) / derf1(l0, p, kbt)\n xdef = l * lnew\n return xdef\n\n\ndef intfdexWLC(kbt, l, p, f):\n l0 = xdefWLC(kbt, l, p, f)\n return kbt * l / (4.0 * p) * (1.0 / (1.0 - l0 / l) + 2 * l0 * l0 / (l *\n l) - l0 / l)\n\n\ndef stretching_energy(K, l, p, f, fmin):\n total = intfdexWLC(K, l, p, f) - intfdexWLC(K, l, p, fmin)\n return total\n\n\nforces = [5, 6, 7, 8, 9, 10, 11, 12]\ndelimiter = ' '\np = 0.75\nK = 4.114\nl = 0.66\nDeltamu = 2.0 * K\nDeltabp = 2.0 * K\nn_atp = 1.0\nforce = forces[7]\nk0 = 1000000.0\ndt = 0.5 / (k0 * 5)\nfmin = 0.0\n<mask token>\nfor i in range(1, 2):\n force = forces[i]\n fmin = 0.0\n DeltaG = n_atp * Deltamu - Deltabp + 2.0 * force * xdefWLC(K, l, p, force\n ) - 2.0 * stretching_energy(K, l, p, force, fmin)\n Ateo = (n_atp * Deltamu / (2 * xdefWLC(K, l, p, force)) - Deltabp / (2 *\n xdefWLC(K, l, p, force)) + force - 1.0 * stretching_energy(K, l, p,\n force, fmin) / xdefWLC(K, l, p, force)) / K\n print(Ateo)\n for j in range(0, 1):\n Dt = 10 * dt * 2 ** j\n dn = 10 * 2 ** j\n n = 0\n folder = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n )\n filein = '%s%s_%dhist.txt' % (folder, force, dn)\n foldout = (\n '/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n )\n data = np.loadtxt(filein)\n data2 = np.asarray(data)\n dx = data2[:, 0]\n pdx = data2[:, 1]\n dxav = 0.0\n pdxav = 0.0\n for k in range(0, len(dx)):\n dxav += dx[k] * pdx[k]\n pdxav += pdx[k]\n dxav /= pdxav\n pdx /= pdxav\n print(force)\n print(Dt)\n print(dxav)\n print('\\n')\n epsilon = 0.08\n delta = 0.4\n y = []\n x = []\n for k in range(0, len(dx)):\n if (dx[k] <= 0.0) & (dx[k] <= -delta):\n for l in range(0, len(dx)):\n if (dx[l] >= 0.0) & (dx[l] >= delta):\n if np.absolute(dx[k] + dx[l]) <= epsilon:\n x.append(dx[l] / dxav)\n y.append(np.log(pdx[l] / pdx[k]) / dxav)\n \"\"\"\t\t\n\t\tprint(x)\n\t\t#print(\"\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t\"\"\"\n fileout = '%s%s_%dFT.txt' % (foldout, force, dn)\n fout = open(fileout, 'w')\n for k in range(0, len(x)):\n fout.write('%.12f %.12f\\n' % (x[k], y[k]))\n fout.close()\n",
"step-5": "#!/usr/bin/python\n\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport seaborn as sns\nimport os\nfrom pathlib import Path\n#import math\n#functions related to elasticity of WLC\ndef f1(l,lp, kbt):\n return kbt/lp*(1./(4*(1.-l)*(1.-l))-.25+l)\n\ndef derf1(l,lp,kbt):\n return kbt/lp*(.5/((1.-l)*(1.-l)*(1.-l))+1.)\n\ndef xdefWLC(kbt, l, p, f):\n l0=.9999\n lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)\n\n if abs(f)<1.e-5: return 0.0\n while abs(l0-lnew)>1.e-5:\n l0=lnew\n lnew=l0-(f1(l0,p,kbt)-f)/derf1(l0,p,kbt)\n \n xdef=l*lnew \n return xdef\n\ndef intfdexWLC (kbt, l, p, f):\n l0=xdefWLC(kbt,l,p,f)\n return kbt*l/(4.*p)*(1./(1.-l0/l)+2*l0*l0/(l*l)-l0/l)\n\ndef stretching_energy(K,l,p,f,fmin):\n total= intfdexWLC(K,l,p,f)-intfdexWLC(K,l,p,fmin)\n return total\n\nforces = [ 5 ,6 ,7 , 8, 9, 10, 11, 12]\ndelimiter = \" \"\n\n\np = 0.75 \nK = 4.114\nl = 0.66\n \n#value of mu(pN/nm), N, etc\nDeltamu=2.0*K #chemical potential of 1 ATP 3*K\nDeltabp=2.0*K #free-energy of 1 bp opening\nn_atp=1. #Number of bp opened by 1-step of the motor\n\nforce = forces[7]\n\n#frequency bps\nk0=1000000.\n#montecarlo step = 1e-7\ndt=0.5/(k0*5)\nfmin=0.0\n\n'''\nprint (DeltaG)\nprint(n_atp*Deltamu)\nprint(Deltabp)\nprint(2.*force*xdefWLC(K,l,p,force))\nprint(2.*stretching_energy(K,l,p,force,fmin))\n#time.sleep(30.5)\nprint(\"pre\")\nprint(k0*np.exp(-DeltaG/(2.*K))*dt)\nprint(\"pun\")\nprint(k0*np.exp(DeltaG/(2.*K))*dt)\n'''\n\n\n\nfor i in range(1,2):\n\tforce=forces[i]\n\tfmin=0.0\n\tDeltaG=n_atp*Deltamu-Deltabp+2.*force*xdefWLC(K,l,p,force)-2.*stretching_energy(K,l,p,force,fmin)\n\tAteo=(n_atp*Deltamu/(2*xdefWLC(K,l,p,force))-Deltabp/(2*xdefWLC(K,l,p,force))+force-1.*stretching_energy(K,l,p,force,fmin)/xdefWLC(K,l,p,force))/K\n\tprint(Ateo)\n\tfor j in range (0,1):\n\t\t#CHECK parameter dt is the same than for the files processed in hist\n\t\tDt=10*dt*2**j\n\t\t#print(Dt)\n\t\tdn=10*2**j\n\t\tn=0\n\t\tfolder='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/DATA/'\n\t\tfilein='%s%s_%dhist.txt' %(folder,force,dn)\n\t\t#File_in Dx, p(Dx)\n\t\tfoldout='/home/xavi/Documents/Helicase/Simulations/Simulation/Simulation_221018_4/FT/'\n\t\t#reading input file\n\t\tdata=np.loadtxt(filein)\n\t\tdata2=np.asarray(data)\n\t\tdx=data2[:,0]\n\t\tpdx=data2[:,1]\n\t\t\n\t\t#computing the average <Dx>\n\t\tdxav=0.\n\t\tpdxav=0.\n\t\t##Cal renormalitzar de nou les probabilitats (al descartar el 0 la integral ha canviat)\n\t\tfor k in range(0,len(dx)):\n\t\t\tdxav+=dx[k]*pdx[k]\n\t\t\tpdxav+=pdx[k]\n\t\tdxav/=pdxav\n\t\tpdx/=pdxav\n\t\t\n\t\tprint(force)\n\t\tprint(Dt)\n\t\tprint(dxav)\n\t\tprint(\"\\n\")\n\t\t\n\t\tepsilon=0.08\n\t\tdelta=0.4\n\t\ty=[]\n\t\tx=[]\n\t\tfor k in range(0,len(dx)):\n\t\t\tif (dx[k]<=0.) & (dx[k]<=-delta):\n\t\t\t\tfor l in range(0,len(dx)):\n\t\t\t\t#comprovem si x[l]+x[k]<epsilon --> computem \n\t\t\t\t\tif (dx[l]>=0.) & (dx[l]>=delta):\n\t\t\t\t\t\tif np.absolute(dx[k]+dx[l]) <= epsilon:\n\t\t\t\t\t\t\tx.append(dx[l]/dxav)\n\t\t\t\t\t\t\ty.append((np.log(pdx[l]/pdx[k]))/dxav)\n\t\t'''\t\t\n\t\tprint(x)\n\t\t#print(\"\\n\")\n\t\t#print(y)\n\t\tprint(len(x))\n\t\tprint(\"\\n\")\n\t\tprint(len(y))\n\t\t#plt.plot(x,y, 'o')\n\t\tplt.plot(x,y)\n\t\tplt.show()\n\t\t'''\n\t\t#writing to an output file for each DT\t\t\n\t\tfileout='%s%s_%dFT.txt' %(foldout,force,dn)\n\t\tfout=open(fileout,\"w\")\n\t\t#fout.write()\n\t\tfor k in range(0,len(x)):\n\t\t\tfout.write('%.12f %.12f\\n'%(x[k],y[k]))\t\t\n\t\tfout.close()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),
path('', api_photos_view, name='users')]
<|reserved_special_token_1|>
from django.urls import path
from photo.api.views import api_photo_detail_view, api_photos_view
urlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),
path('', api_photos_view, name='users')]
|
flexible
|
{
"blob_id": "ab4145ccc0b360dcca9b9aa6ebe919bdddac65a2",
"index": 3962,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),\n path('', api_photos_view, name='users')]\n",
"step-3": "from django.urls import path\nfrom photo.api.views import api_photo_detail_view, api_photos_view\nurlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),\n path('', api_photos_view, name='users')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Client component of the Quartjes connector. Use the ClientConnector to create
a connection to the Quartjes server.
Usage
-----
Create an instance of this object with the host and port to connect to.
Call the start() method to establish the connection.
Now the database and the stock_exchange variable can be used to communicate
with the server.
If you do not wish to connect to a server, but run a local server instead,
create the object without any arguments.
Example
-------
>>> conn = ClientConnector("192.168.1.1")
>>> conn.start()
>>> conn.database.get_drinks()
Available server methods
------------------------
Currently two server objects are made available upon connection. Please see the
documentation for the server object for available methods and events:
* database: :class:`quartjes.controllers.database.Database`
* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`
Advanced
--------
Use the method get_service_interface to retrieve additional interfaces to a server side
service.
As long as the connector is running, it will keep trying to reconnect any
lost connections using an exponential back-off.
ClientConnector class
---------------------
"""
__author__ = "Rob van der Most"
__docformat__ = "restructuredtext en"
from quartjes.connector.protocol import QuartjesClientFactory
from twisted.internet import reactor, threads
from threading import Thread
from quartjes.connector.services import ServiceInterface
import quartjes.controllers.database
import quartjes.controllers.stock_exchange2
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(), "Host should not be changed will connected."
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(), "Port should not be changed will connected."
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print("No host selected, starting local instance.")
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()
else:
reactor.callLater(0, self._connect) #@UndefinedVariable
if not reactor.running: #@UndefinedVariable
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface("database")
self._stock_exchange = self.get_service_interface("stock_exchange")
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
#threads.blockingCallFromThread(reactor, self._factory.stopTrying)
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
#print("Connecting...")
self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name="ReactorThread")
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0) #@UndefinedVariable
def tk_event_listener(F):
"""
Make a method able to receive events from the connector while running in
the TK mainloop.
"""
def listener(self, *pargs, **kwargs):
self._event_queue.put((F, self, pargs, kwargs))
return listener
def tk_prepare_instance_for_events(instance):
"""
Prepare a class to receive events from outside the tk mainloop.
Call this from the TK mainloop before any events are going to be received.
Decorate methods to call using tk_event_listener
"""
def listener():
try:
while 1:
(method, self, pargs, kwargs) = instance._event_queue.get_nowait()
method(self, *pargs, **kwargs)
except Queue.Empty:
pass
instance.after(100, listener)
import Queue
instance._event_queue = Queue.Queue()
instance.after(100, listener)
|
normal
|
{
"blob_id": "a8f200e0ae1252df4ad6560e5756347cd0e4c8ba",
"index": 5034,
"step-1": "<mask token>\n\n\nclass ClientConnector(object):\n <mask token>\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n <mask token>\n <mask token>\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n <mask token>\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n__author__ = 'Rob van der Most'\n__docformat__ = 'restructuredtext en'\n<mask token>\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n return listener\n\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n\n def listener():\n try:\n while 1:\n method, self, pargs, kwargs = instance._event_queue.get_nowait(\n )\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n",
"step-4": "<mask token>\n__author__ = 'Rob van der Most'\n__docformat__ = 'restructuredtext en'\nfrom quartjes.connector.protocol import QuartjesClientFactory\nfrom twisted.internet import reactor, threads\nfrom threading import Thread\nfrom quartjes.connector.services import ServiceInterface\nimport quartjes.controllers.database\nimport quartjes.controllers.stock_exchange2\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n return listener\n\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n\n def listener():\n try:\n while 1:\n method, self, pargs, kwargs = instance._event_queue.get_nowait(\n )\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n",
"step-5": "\"\"\"\nClient component of the Quartjes connector. Use the ClientConnector to create\na connection to the Quartjes server.\n\nUsage\n-----\nCreate an instance of this object with the host and port to connect to.\nCall the start() method to establish the connection.\nNow the database and the stock_exchange variable can be used to communicate\nwith the server.\n\nIf you do not wish to connect to a server, but run a local server instead,\ncreate the object without any arguments.\n\nExample\n-------\n>>> conn = ClientConnector(\"192.168.1.1\")\n>>> conn.start()\n>>> conn.database.get_drinks()\n\nAvailable server methods\n------------------------\n\nCurrently two server objects are made available upon connection. Please see the\ndocumentation for the server object for available methods and events:\n\n* database: :class:`quartjes.controllers.database.Database`\n* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`\n\nAdvanced\n--------\n\nUse the method get_service_interface to retrieve additional interfaces to a server side\nservice.\n\nAs long as the connector is running, it will keep trying to reconnect any\nlost connections using an exponential back-off.\n\nClientConnector class\n---------------------\n\n\"\"\"\n__author__ = \"Rob van der Most\"\n__docformat__ = \"restructuredtext en\"\n\nfrom quartjes.connector.protocol import QuartjesClientFactory\nfrom twisted.internet import reactor, threads\nfrom threading import Thread\nfrom quartjes.connector.services import ServiceInterface\nimport quartjes.controllers.database\nimport quartjes.controllers.stock_exchange2\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n \n @host.setter\n def host(self, value):\n assert not self.is_connected(), \"Host should not be changed will connected.\"\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n \n @port.setter\n def port(self, value):\n assert not self.is_connected(), \"Port should not be changed will connected.\"\n self._port = value\n \n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n \n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n \n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n \n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print(\"No host selected, starting local instance.\")\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()\n else:\n reactor.callLater(0, self._connect) #@UndefinedVariable\n if not reactor.running: #@UndefinedVariable\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n\n self._database = self.get_service_interface(\"database\")\n self._stock_exchange = self.get_service_interface(\"stock_exchange\")\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n #threads.blockingCallFromThread(reactor, self._factory.stopTrying)\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n def __init__(self):\n Thread.__init__(self, name=\"ReactorThread\")\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0) #@UndefinedVariable\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n \n return listener\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n def listener():\n try:\n while 1:\n (method, self, pargs, kwargs) = instance._event_queue.get_nowait()\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n \n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n \n",
"step-ids": [
12,
16,
19,
20,
21
]
}
|
[
12,
16,
19,
20,
21
] |
a=[i for i in range(10)]
del a[0]
print a
del a[-1]
print a
del a[1]
print a
del a[0:2]
print a
del a[1:3:1]
print a
#test del all
del a[:]
print a
a.append(1)
print a
# Make sure that del's work correctly in sub-scopes:
x = 1
def f1():
x = range(5)
def f2():
del x[1]
return f2
f1()()
|
normal
|
{
"blob_id": "d0e5cfc7b619c2eaec19248619d7d59e41503c89",
"index": 4302,
"step-1": "a=[i for i in range(10)]\ndel a[0]\nprint a\ndel a[-1]\nprint a\ndel a[1]\nprint a\n\ndel a[0:2] \nprint a \ndel a[1:3:1]\nprint a\n#test del all\ndel a[:]\nprint a\na.append(1)\nprint a\n\n# Make sure that del's work correctly in sub-scopes:\nx = 1\ndef f1():\n x = range(5)\n def f2():\n del x[1]\n return f2\nf1()()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def lambda_handler(event, context):
products = list(Product.scan(Product.do_crawl == True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
<|reserved_special_token_0|>
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
products = list(Product.scan(Product.do_crawl == True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEBUG = False
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
products = list(Product.scan(Product.do_crawl == True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
<|reserved_special_token_1|>
import os
import datetime
import traceback
import json
import requests
import logging
from model import Product
from naver_api import naver_client_id, naver_client_secret
DEBUG = False
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
products = list(Product.scan(Product.do_crawl == True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
<|reserved_special_token_1|>
import os
import datetime
import traceback
import json
import requests
import logging
from model import Product
from naver_api import naver_client_id, naver_client_secret
DEBUG = False
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
# print(naver_client_id)
# print(naver_client_secret)
products = list(Product.scan(Product.do_crawl==True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
|
flexible
|
{
"blob_id": "76905171602cbeb53903a4b0259685288da3a083",
"index": 6365,
"step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-2": "<mask token>\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n<mask token>\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-3": "<mask token>\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-4": "import os\nimport datetime\nimport traceback\nimport json\nimport requests\nimport logging\nfrom model import Product\nfrom naver_api import naver_client_id, naver_client_secret\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-5": "import os\nimport datetime\nimport traceback\nimport json\nimport requests\nimport logging\n\nfrom model import Product\nfrom naver_api import naver_client_id, naver_client_secret\n\n\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n # print(naver_client_id)\n # print(naver_client_secret)\n\n products = list(Product.scan(Product.do_crawl==True))\n\n for product in products:\n product.search_lowest_price()\n\n print('{} product(s) crawled'.format(len(products)))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
<|reserved_special_token_0|>
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
<|reserved_special_token_0|>
df.drop_duplicates(subset='name', keep='first', inplace=True)
<|reserved_special_token_0|>
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
<|reserved_special_token_0|>
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
<|reserved_special_token_0|>
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json('PreProcessData.json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
data_path = (
'/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'
)
weights = ['mg', 'ml', '%']
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
df = pd.read_json(data_path, orient='records')
df.drop_duplicates(subset='name', keep='first', inplace=True)
df.index = range(len(df.index))
nonTiengViet_df = df.loc[df['name'].str.contains('[^\\x00-\\x7F]+') == False]
nonTiengViet_df['noSpace'] = nonTiengViet_df.name
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
nonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)
nonTiengViet_df.index = range(len(nonTiengViet_df.index))
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
new_df = splitMedicine(nonTiengViet_df)
nonTiengViet_df['shortname'] = new_df['short name']
nonTiengViet_df['number'] = new_df['number']
nonTiengViet_df['weight'] = new_df['weight']
nonTiengViet_df['noSpace'] = nonTiengViet_df.shortname
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json('PreProcessData.json')
<|reserved_special_token_1|>
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import warnings
import re
warnings.filterwarnings('ignore')
data_path = (
'/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'
)
weights = ['mg', 'ml', '%']
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
df = pd.read_json(data_path, orient='records')
df.drop_duplicates(subset='name', keep='first', inplace=True)
df.index = range(len(df.index))
nonTiengViet_df = df.loc[df['name'].str.contains('[^\\x00-\\x7F]+') == False]
nonTiengViet_df['noSpace'] = nonTiengViet_df.name
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
nonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)
nonTiengViet_df.index = range(len(nonTiengViet_df.index))
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
new_df = splitMedicine(nonTiengViet_df)
nonTiengViet_df['shortname'] = new_df['short name']
nonTiengViet_df['number'] = new_df['number']
nonTiengViet_df['weight'] = new_df['weight']
nonTiengViet_df['noSpace'] = nonTiengViet_df.shortname
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json('PreProcessData.json')
<|reserved_special_token_1|>
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import warnings
import re
warnings.filterwarnings("ignore")
data_path = '/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'
weights = ['mg', 'ml', '%']
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word): #2 trường hợp 200 200mg
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return (gnumber, gweight ,' '.join(gname))
def cleanName(name):
return re.sub(r'[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset ='noSpace',
keep = 'first', inplace = True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a,b,c in df_temp.values], columns=['number', 'weight', 'short name'])
return new_df
#Read data
df = pd.read_json(data_path, orient='records')
df.drop_duplicates(subset ="name",
keep = 'first', inplace = True)
df.index = range(len(df.index))
#Xoá các thuốc có tiếng việt
nonTiengViet_df = df.loc[df['name'].str.contains(r'[^\x00-\x7F]+') == False]
#print(nonTiengViet_df.head(10))
#Remove duplicate bằng cách xoá hết các khoảng trắng của tên thuốc, nếu trùng tên và thành phần thì xoá
nonTiengViet_df['noSpace'] = nonTiengViet_df.name
rm_character = ['-', '\"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
# sort dataframe:
nonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)
nonTiengViet_df.index = range(len(nonTiengViet_df.index))
# split thuốc theo [' ', '-']
# Tìm các từ có tồn tại số 200, 200mg, 0.1mg/ml 150 ....
#
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
new_df = splitMedicine(nonTiengViet_df)
nonTiengViet_df['shortname'] = new_df['short name']
nonTiengViet_df['number'] = new_df['number']
nonTiengViet_df['weight'] = new_df['weight']
nonTiengViet_df['noSpace'] = nonTiengViet_df.shortname
rm_character = ['-', '\"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json(r'PreProcessData.json')
|
flexible
|
{
"blob_id": "b808daf8d1fbe3cc585db57e1049a502d3ca46f5",
"index": 857,
"step-1": "<mask token>\n\n\ndef formatName(name):\n arr = re.split(' |-', name)\n print(arr)\n gweight = ''\n gname = []\n gnumber = ''\n for word in arr:\n if any(str.isdigit(c) for c in word):\n for weight in weights:\n pos = word.find(weight)\n if pos != -1:\n gweight = weight\n gnumber = word[:pos]\n break\n else:\n gnumber = word\n elif any(word == weight for weight in weights):\n gweight = word\n elif word != '':\n gname.append(word)\n return gnumber, gweight, ' '.join(gname)\n\n\ndef cleanName(name):\n return re.sub('[^a-z0-9]', '', name.lower())\n\n\ndef rmSpecialCharacters(df):\n df['noSpace'] = df['noSpace'].apply(cleanName)\n\n\ndef rmDuplicate(df):\n df.drop_duplicates(subset='noSpace', keep='first', inplace=True)\n df.index = range(len(df.index))\n\n\ndef splitMedicine(df):\n df_temp = df['name'].apply(formatName)\n new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],\n columns=['number', 'weight', 'short name'])\n return new_df\n\n\n<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\n\n\ndef formatName(name):\n arr = re.split(' |-', name)\n print(arr)\n gweight = ''\n gname = []\n gnumber = ''\n for word in arr:\n if any(str.isdigit(c) for c in word):\n for weight in weights:\n pos = word.find(weight)\n if pos != -1:\n gweight = weight\n gnumber = word[:pos]\n break\n else:\n gnumber = word\n elif any(word == weight for weight in weights):\n gweight = word\n elif word != '':\n gname.append(word)\n return gnumber, gweight, ' '.join(gname)\n\n\ndef cleanName(name):\n return re.sub('[^a-z0-9]', '', name.lower())\n\n\ndef rmSpecialCharacters(df):\n df['noSpace'] = df['noSpace'].apply(cleanName)\n\n\ndef rmDuplicate(df):\n df.drop_duplicates(subset='noSpace', keep='first', inplace=True)\n df.index = range(len(df.index))\n\n\ndef splitMedicine(df):\n df_temp = df['name'].apply(formatName)\n new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],\n columns=['number', 'weight', 'short name'])\n return new_df\n\n\n<mask token>\ndf.drop_duplicates(subset='name', keep='first', inplace=True)\n<mask token>\nrmSpecialCharacters(nonTiengViet_df)\nrmDuplicate(nonTiengViet_df)\n<mask token>\nprint(formatName('10mg Dextrose in Water Parenteral Solution for ..'))\nsplitMedicine(nonTiengViet_df)\n<mask token>\nrmSpecialCharacters(nonTiengViet_df)\nrmDuplicate(nonTiengViet_df)\nprint(nonTiengViet_df.describe)\nprint(nonTiengViet_df.tail(5))\nnonTiengViet_df.to_json('PreProcessData.json')\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\ndata_path = (\n '/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'\n )\nweights = ['mg', 'ml', '%']\n\n\ndef formatName(name):\n arr = re.split(' |-', name)\n print(arr)\n gweight = ''\n gname = []\n gnumber = ''\n for word in arr:\n if any(str.isdigit(c) for c in word):\n for weight in weights:\n pos = word.find(weight)\n if pos != -1:\n gweight = weight\n gnumber = word[:pos]\n break\n else:\n gnumber = word\n elif any(word == weight for weight in weights):\n gweight = word\n elif word != '':\n gname.append(word)\n return gnumber, gweight, ' '.join(gname)\n\n\ndef cleanName(name):\n return re.sub('[^a-z0-9]', '', name.lower())\n\n\ndef rmSpecialCharacters(df):\n df['noSpace'] = df['noSpace'].apply(cleanName)\n\n\ndef rmDuplicate(df):\n df.drop_duplicates(subset='noSpace', keep='first', inplace=True)\n df.index = range(len(df.index))\n\n\ndef splitMedicine(df):\n df_temp = df['name'].apply(formatName)\n new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],\n columns=['number', 'weight', 'short name'])\n return new_df\n\n\ndf = pd.read_json(data_path, orient='records')\ndf.drop_duplicates(subset='name', keep='first', inplace=True)\ndf.index = range(len(df.index))\nnonTiengViet_df = df.loc[df['name'].str.contains('[^\\\\x00-\\\\x7F]+') == False]\nnonTiengViet_df['noSpace'] = nonTiengViet_df.name\nrm_character = ['-', '\"', '/', ' ', ',', '.']\nrmSpecialCharacters(nonTiengViet_df)\nrmDuplicate(nonTiengViet_df)\nnonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)\nnonTiengViet_df.index = range(len(nonTiengViet_df.index))\nprint(formatName('10mg Dextrose in Water Parenteral Solution for ..'))\nsplitMedicine(nonTiengViet_df)\nnew_df = splitMedicine(nonTiengViet_df)\nnonTiengViet_df['shortname'] = new_df['short name']\nnonTiengViet_df['number'] = new_df['number']\nnonTiengViet_df['weight'] = new_df['weight']\nnonTiengViet_df['noSpace'] = nonTiengViet_df.shortname\nrm_character = ['-', '\"', '/', ' ', ',', '.']\nrmSpecialCharacters(nonTiengViet_df)\nrmDuplicate(nonTiengViet_df)\nprint(nonTiengViet_df.describe)\nprint(nonTiengViet_df.tail(5))\nnonTiengViet_df.to_json('PreProcessData.json')\n",
"step-4": "import pandas as pd\nfrom pandas.io.json import json_normalize\nimport numpy as np\nimport warnings\nimport re\nwarnings.filterwarnings('ignore')\ndata_path = (\n '/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'\n )\nweights = ['mg', 'ml', '%']\n\n\ndef formatName(name):\n arr = re.split(' |-', name)\n print(arr)\n gweight = ''\n gname = []\n gnumber = ''\n for word in arr:\n if any(str.isdigit(c) for c in word):\n for weight in weights:\n pos = word.find(weight)\n if pos != -1:\n gweight = weight\n gnumber = word[:pos]\n break\n else:\n gnumber = word\n elif any(word == weight for weight in weights):\n gweight = word\n elif word != '':\n gname.append(word)\n return gnumber, gweight, ' '.join(gname)\n\n\ndef cleanName(name):\n return re.sub('[^a-z0-9]', '', name.lower())\n\n\ndef rmSpecialCharacters(df):\n df['noSpace'] = df['noSpace'].apply(cleanName)\n\n\ndef rmDuplicate(df):\n df.drop_duplicates(subset='noSpace', keep='first', inplace=True)\n df.index = range(len(df.index))\n\n\ndef splitMedicine(df):\n df_temp = df['name'].apply(formatName)\n new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],\n columns=['number', 'weight', 'short name'])\n return new_df\n\n\ndf = pd.read_json(data_path, orient='records')\ndf.drop_duplicates(subset='name', keep='first', inplace=True)\ndf.index = range(len(df.index))\nnonTiengViet_df = df.loc[df['name'].str.contains('[^\\\\x00-\\\\x7F]+') == False]\nnonTiengViet_df['noSpace'] = nonTiengViet_df.name\nrm_character = ['-', '\"', '/', ' ', ',', '.']\nrmSpecialCharacters(nonTiengViet_df)\nrmDuplicate(nonTiengViet_df)\nnonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)\nnonTiengViet_df.index = range(len(nonTiengViet_df.index))\nprint(formatName('10mg Dextrose in Water Parenteral Solution for ..'))\nsplitMedicine(nonTiengViet_df)\nnew_df = splitMedicine(nonTiengViet_df)\nnonTiengViet_df['shortname'] = new_df['short name']\nnonTiengViet_df['number'] = new_df['number']\nnonTiengViet_df['weight'] = new_df['weight']\nnonTiengViet_df['noSpace'] = nonTiengViet_df.shortname\nrm_character = ['-', '\"', '/', ' ', ',', '.']\nrmSpecialCharacters(nonTiengViet_df)\nrmDuplicate(nonTiengViet_df)\nprint(nonTiengViet_df.describe)\nprint(nonTiengViet_df.tail(5))\nnonTiengViet_df.to_json('PreProcessData.json')\n",
"step-5": "import pandas as pd\nfrom pandas.io.json import json_normalize\nimport numpy as np\nimport warnings\nimport re\nwarnings.filterwarnings(\"ignore\")\n\ndata_path = '/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'\n\nweights = ['mg', 'ml', '%']\n\ndef formatName(name):\n arr = re.split(' |-', name)\n print(arr) \n gweight = ''\n gname = [] \n gnumber = ''\n for word in arr:\n if any(str.isdigit(c) for c in word): #2 trường hợp 200 200mg\n for weight in weights:\n pos = word.find(weight)\n if pos != -1:\n gweight = weight \n gnumber = word[:pos]\n break\n else:\n gnumber = word\n \n elif any(word == weight for weight in weights):\n gweight = word\n elif word != '':\n gname.append(word)\n\n return (gnumber, gweight ,' '.join(gname))\n\ndef cleanName(name):\n return re.sub(r'[^a-z0-9]', '', name.lower()) \n\ndef rmSpecialCharacters(df):\n df['noSpace'] = df['noSpace'].apply(cleanName)\n \ndef rmDuplicate(df):\n df.drop_duplicates(subset ='noSpace', \n keep = 'first', inplace = True)\n df.index = range(len(df.index))\n\ndef splitMedicine(df):\n df_temp = df['name'].apply(formatName) \n new_df = pd.DataFrame([[a, b, c] for a,b,c in df_temp.values], columns=['number', 'weight', 'short name'])\n return new_df \n\n#Read data\ndf = pd.read_json(data_path, orient='records')\n\ndf.drop_duplicates(subset =\"name\", \n keep = 'first', inplace = True)\ndf.index = range(len(df.index))\n\n#Xoá các thuốc có tiếng việt\nnonTiengViet_df = df.loc[df['name'].str.contains(r'[^\\x00-\\x7F]+') == False]\n#print(nonTiengViet_df.head(10))\n\n#Remove duplicate bằng cách xoá hết các khoảng trắng của tên thuốc, nếu trùng tên và thành phần thì xoá \nnonTiengViet_df['noSpace'] = nonTiengViet_df.name \nrm_character = ['-', '\\\"', '/', ' ', ',', '.']\nrmSpecialCharacters(nonTiengViet_df)\n\nrmDuplicate(nonTiengViet_df)\n\n# sort dataframe:\nnonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)\nnonTiengViet_df.index = range(len(nonTiengViet_df.index))\n# split thuốc theo [' ', '-']\n# Tìm các từ có tồn tại số 200, 200mg, 0.1mg/ml 150 ....\n# \nprint(formatName('10mg Dextrose in Water Parenteral Solution for ..'))\nsplitMedicine(nonTiengViet_df)\n\nnew_df = splitMedicine(nonTiengViet_df)\nnonTiengViet_df['shortname'] = new_df['short name']\nnonTiengViet_df['number'] = new_df['number']\nnonTiengViet_df['weight'] = new_df['weight']\n\n\nnonTiengViet_df['noSpace'] = nonTiengViet_df.shortname \nrm_character = ['-', '\\\"', '/', ' ', ',', '.']\nrmSpecialCharacters(nonTiengViet_df)\n\nrmDuplicate(nonTiengViet_df)\n\nprint(nonTiengViet_df.describe)\nprint(nonTiengViet_df.tail(5))\nnonTiengViet_df.to_json(r'PreProcessData.json')\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import xarray as xr
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
#dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')
dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)
#dsClm = dsClm.where(dsMsk == nyear)
#dsMsk.to_netcdf('era5.count.nc4')
print (dsClm)
filo = fili.replace('annual','annual.clm')
print (f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )
parser.add_argument('fili', type=str, help='path to annual file')
parser.add_argument('--start_year', '-sy', default=1981,
help='First year for climatology')
parser.add_argument('--end_year', '-ey', default=2015,
help='Last year for climatology')
args = parser.parse_args()
precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)
|
normal
|
{
"blob_id": "eb403fbb307332c18ffdcdf52589c714f0719960",
"index": 3052,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'Calculates climatology from annual data')\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981, help=\n 'First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015, help=\n 'Last year for climatology')\n args = parser.parse_args()\n precip_stats_to_climatology(args.fili, start_year=args.start_year,\n end_year=args.end_year)\n",
"step-4": "import xarray as xr\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'Calculates climatology from annual data')\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981, help=\n 'First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015, help=\n 'Last year for climatology')\n args = parser.parse_args()\n precip_stats_to_climatology(args.fili, start_year=args.start_year,\n end_year=args.end_year)\n",
"step-5": "import xarray as xr\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981,\n help='First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015,\n help='Last year for climatology')\n args = parser.parse_args()\n\n precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)\n \n\n \n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Geocode(address, apiKey):
URL = 'https://geocode.search.hereapi.com/v1/geocode'
params = {'q': address, 'apiKey': apiKey}
import pdb
pdb.set_trace()
response = requests.get(URL, params=params).json()
item = response['items'][0]
address = item['address']
position = item['position']
result = {'address': address['label'], 'lat': position['lat'], 'lng':
position['lng']}
return result
if __name__ == '__main__':
address = 'Украина, Александрия, Соборный проспект 98'
res = Geocode(address, _apiKey)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_apiKey = 'SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ'
def Geocode(address, apiKey):
URL = 'https://geocode.search.hereapi.com/v1/geocode'
params = {'q': address, 'apiKey': apiKey}
import pdb
pdb.set_trace()
response = requests.get(URL, params=params).json()
item = response['items'][0]
address = item['address']
position = item['position']
result = {'address': address['label'], 'lat': position['lat'], 'lng':
position['lng']}
return result
if __name__ == '__main__':
address = 'Украина, Александрия, Соборный проспект 98'
res = Geocode(address, _apiKey)
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import json
import geojson
import re
import time
_apiKey = 'SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ'
def Geocode(address, apiKey):
URL = 'https://geocode.search.hereapi.com/v1/geocode'
params = {'q': address, 'apiKey': apiKey}
import pdb
pdb.set_trace()
response = requests.get(URL, params=params).json()
item = response['items'][0]
address = item['address']
position = item['position']
result = {'address': address['label'], 'lat': position['lat'], 'lng':
position['lng']}
return result
if __name__ == '__main__':
address = 'Украина, Александрия, Соборный проспект 98'
res = Geocode(address, _apiKey)
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import json
import geojson
import re
import time
_apiKey = "SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ"
def Geocode(address, apiKey):
URL = 'https://geocode.search.hereapi.com/v1/geocode'
# Параметры запроса
params = {
'q': address,
'apiKey': apiKey
}
import pdb; pdb.set_trace()
# Парсинг ответа в JSON формате
response = requests.get(URL, params=params).json()
item = response['items'][0]
address = item['address']
position = item['position']
result = {
'address': address['label'],
'lat': position['lat'],
'lng': position['lng'],
}
return result
if __name__ == "__main__":
address = "Украина, Александрия, Соборный проспект 98"
res = Geocode(address, _apiKey)
|
flexible
|
{
"blob_id": "d32496c9bce86f455b24cd9c6dc263aee1bf82af",
"index": 3552,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Geocode(address, apiKey):\n URL = 'https://geocode.search.hereapi.com/v1/geocode'\n params = {'q': address, 'apiKey': apiKey}\n import pdb\n pdb.set_trace()\n response = requests.get(URL, params=params).json()\n item = response['items'][0]\n address = item['address']\n position = item['position']\n result = {'address': address['label'], 'lat': position['lat'], 'lng':\n position['lng']}\n return result\n\n\nif __name__ == '__main__':\n address = 'Украина, Александрия, Соборный проспект 98'\n res = Geocode(address, _apiKey)\n",
"step-3": "<mask token>\n_apiKey = 'SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ'\n\n\ndef Geocode(address, apiKey):\n URL = 'https://geocode.search.hereapi.com/v1/geocode'\n params = {'q': address, 'apiKey': apiKey}\n import pdb\n pdb.set_trace()\n response = requests.get(URL, params=params).json()\n item = response['items'][0]\n address = item['address']\n position = item['position']\n result = {'address': address['label'], 'lat': position['lat'], 'lng':\n position['lng']}\n return result\n\n\nif __name__ == '__main__':\n address = 'Украина, Александрия, Соборный проспект 98'\n res = Geocode(address, _apiKey)\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport geojson\nimport re\nimport time\n_apiKey = 'SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ'\n\n\ndef Geocode(address, apiKey):\n URL = 'https://geocode.search.hereapi.com/v1/geocode'\n params = {'q': address, 'apiKey': apiKey}\n import pdb\n pdb.set_trace()\n response = requests.get(URL, params=params).json()\n item = response['items'][0]\n address = item['address']\n position = item['position']\n result = {'address': address['label'], 'lat': position['lat'], 'lng':\n position['lng']}\n return result\n\n\nif __name__ == '__main__':\n address = 'Украина, Александрия, Соборный проспект 98'\n res = Geocode(address, _apiKey)\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport geojson\nimport re\nimport time\n\n_apiKey = \"SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ\"\n\ndef Geocode(address, apiKey):\n URL = 'https://geocode.search.hereapi.com/v1/geocode'\n\n # Параметры запроса\n params = {\n 'q': address,\n 'apiKey': apiKey\n }\n \n import pdb; pdb.set_trace()\n # Парсинг ответа в JSON формате\n response = requests.get(URL, params=params).json()\n item = response['items'][0]\n\n address = item['address']\n position = item['position']\n\n result = {\n 'address': address['label'],\n 'lat': position['lat'],\n 'lng': position['lng'],\n }\n \n return result\n\nif __name__ == \"__main__\":\n address = \"Украина, Александрия, Соборный проспект 98\"\n res = Geocode(address, _apiKey)",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from secrets import randbelow
print(randbelow(100))
|
normal
|
{
"blob_id": "18ae982c7fac7a31e0d257f500da0be0851388c2",
"index": 8985,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(randbelow(100))\n",
"step-3": "from secrets import randbelow\nprint(randbelow(100))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django import forms
from .models import Project
from user.models import User
from assets.models import Assets
class CreateProjectForm(forms.ModelForm):
project_name = forms.CharField(
label='项目名',
widget=forms.TextInput(
attrs={"class": "form-control"}
)
)
project_desc = forms.CharField(
label='项目说明',
required=False,
widget=forms.Textarea(
attrs={"class": "form-control", "cols": 40, "rows": 5}
)
)
auth_users = forms.ModelMultipleChoiceField(
label='授权用户',
required=False,
queryset=User.get_all(),
widget=forms.SelectMultiple(
attrs={"class": "form-control selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%", }
)
)
assets_set = forms.ModelMultipleChoiceField(
label="旗下资产",
required=False,
help_text="如果你从资产创建打开此页面,晴忽略该项内容",
queryset=Assets.get_all(),
widget=forms.SelectMultiple(
attrs={
"class": "selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%",
}
)
)
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
def clean_project_name(self):
pro_name = self.cleaned_data['project_name']
name = Project.get_by_name(pro_name)
if name:
raise forms.ValidationError("该项目已存在")
return pro_name
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(
label='项目名',
widget=forms.TextInput(
attrs={"class": "form-control"}
)
)
project_desc = forms.CharField(
label='项目说明',
required=False,
widget=forms.Textarea(
attrs={"class": "form-control", "cols": 40, "rows": 5}
)
)
auth_users = forms.ModelMultipleChoiceField(
label='授权用户',
required=False,
queryset=User.get_all(),
widget=forms.SelectMultiple(
attrs={"class": "form-control selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%", }
)
)
assets_set = forms.ModelMultipleChoiceField(
label="旗下资产",
required=False,
help_text="如果你从资产创建打开此页面,晴忽略该项内容",
queryset=Assets.get_all(),
widget=forms.SelectMultiple(
attrs={
"class": "selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%",
}
)
)
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
|
normal
|
{
"blob_id": "599c5c02397f283eb00f7343e65c5cb977442e38",
"index": 3848,
"step-1": "<mask token>\n\n\nclass CreateProjectForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n <mask token>\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-2": "<mask token>\n\n\nclass CreateProjectForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError('该项目已存在')\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-3": "<mask token>\n\n\nclass CreateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError('该项目已存在')\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-4": "from django import forms\nfrom .models import Project\nfrom user.models import User\nfrom assets.models import Assets\n\n\nclass CreateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError('该项目已存在')\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(label='项目名', widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n project_desc = forms.CharField(label='项目说明', required=False, widget=\n forms.Textarea(attrs={'class': 'form-control', 'cols': 40, 'rows': 5}))\n auth_users = forms.ModelMultipleChoiceField(label='授权用户', required=\n False, queryset=User.get_all(), widget=forms.SelectMultiple(attrs={\n 'class': 'form-control selectpicker', 'data-live-search': 'true',\n 'data-size': '5', 'data-width': '100%'}))\n assets_set = forms.ModelMultipleChoiceField(label='旗下资产', required=\n False, help_text='如果你从资产创建打开此页面,晴忽略该项内容', queryset=Assets.get_all(),\n widget=forms.SelectMultiple(attrs={'class': 'selectpicker',\n 'data-live-search': 'true', 'data-size': '5', 'data-width': '100%'}))\n\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-5": "from django import forms\nfrom .models import Project\nfrom user.models import User\nfrom assets.models import Assets\n\n\nclass CreateProjectForm(forms.ModelForm):\n project_name = forms.CharField(\n label='项目名',\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\"}\n )\n )\n project_desc = forms.CharField(\n label='项目说明',\n required=False,\n widget=forms.Textarea(\n attrs={\"class\": \"form-control\", \"cols\": 40, \"rows\": 5}\n )\n )\n auth_users = forms.ModelMultipleChoiceField(\n label='授权用户',\n required=False,\n queryset=User.get_all(),\n widget=forms.SelectMultiple(\n attrs={\"class\": \"form-control selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\", }\n )\n )\n assets_set = forms.ModelMultipleChoiceField(\n label=\"旗下资产\",\n required=False,\n help_text=\"如果你从资产创建打开此页面,晴忽略该项内容\",\n queryset=Assets.get_all(),\n widget=forms.SelectMultiple(\n attrs={\n \"class\": \"selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\",\n }\n )\n )\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n\n def clean_project_name(self):\n pro_name = self.cleaned_data['project_name']\n name = Project.get_by_name(pro_name)\n if name:\n raise forms.ValidationError(\"该项目已存在\")\n return pro_name\n\n\nclass UpdateProjectForm(forms.ModelForm):\n project_name = forms.CharField(\n label='项目名',\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\"}\n )\n )\n project_desc = forms.CharField(\n label='项目说明',\n required=False,\n widget=forms.Textarea(\n attrs={\"class\": \"form-control\", \"cols\": 40, \"rows\": 5}\n )\n )\n auth_users = forms.ModelMultipleChoiceField(\n label='授权用户',\n required=False,\n queryset=User.get_all(),\n widget=forms.SelectMultiple(\n attrs={\"class\": \"form-control selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\", }\n )\n )\n assets_set = forms.ModelMultipleChoiceField(\n label=\"旗下资产\",\n required=False,\n help_text=\"如果你从资产创建打开此页面,晴忽略该项内容\",\n queryset=Assets.get_all(),\n widget=forms.SelectMultiple(\n attrs={\n \"class\": \"selectpicker\", \"data-live-search\": \"true\", \"data-size\": \"5\",\n \"data-width\": \"100%\",\n }\n )\n )\n\n class Meta:\n model = Project\n fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('-d', type=str, help='dataset')
parser.add_argument('-o', type=str, default='dataset', help='output directory')
parser.add_argument('-f', type=str, default='mp4', help='format')
parser.add_argument('-c', type=str, default='', help='country')
parser.add_argument('-q', type=int, default=0, help='quality')
parser.add_argument('-i', action='store_true', default=False, help=
'ignore download')
<|reserved_special_token_0|>
print('[*] Video Download Finished')
<|reserved_special_token_0|>
for aid in aids:
extra = dict()
if 'ep' in aid:
epid = aid
aid = attr['aid']
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)
)
page = int(epid[2:]) - int(attr['base'][2:]) + 1
info = GetVideoInfo(aid.strip('av'), key, 1)
else:
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))
info = GetVideoInfo(aid.strip('av'), key)
extra['danmaku'] = request_danmaku(cid=info.cid)
if 'country' in attr:
extra['country'] = attr['country']
extra['complete'] = False
else:
capture = get_capture(fn)
print('[*] Capture : {}'.format(fn))
extra['duration'] = get_duration(capture=capture)
extra['duration'] = get_duration(capture=capture)
extra['nframes'] = get_nframes(capture=capture)
extra['fps'] = get_fps(capture=capture)
extra['boundary'] = get_boundary(fn, capture, extra['nframes'],
'hecate')
extra['positions'] = get_positions(extra['nframes'])
extra['fpsegment'] = get_fpsegment(extra['boundary'])
extra['score'] = get_score(**extra)
extra['summary'] = get_summary(**extra)
extra['complete'] = True
for k, v in extra.items():
setattr(info, k, v)
infos[aid] = info
save_pickle(infos, '{}.info'.format(args.d))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
key = '03fc8eb101b091fb'
parser = argparse.ArgumentParser(description='Download Video From Bilibili')
parser.add_argument('-d', type=str, help='dataset')
parser.add_argument('-o', type=str, default='dataset', help='output directory')
parser.add_argument('-f', type=str, default='mp4', help='format')
parser.add_argument('-c', type=str, default='', help='country')
parser.add_argument('-q', type=int, default=0, help='quality')
parser.add_argument('-i', action='store_true', default=False, help=
'ignore download')
args = parser.parse_args()
cookie = dict()
cookie['DedeUserID'] = '347368229'
cookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'
cookie['sid'] = 'ii8ca1k2'
cookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'
aids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.
join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,
debug=True)
print('[*] Video Download Finished')
infos = dict()
for aid in aids:
extra = dict()
if 'ep' in aid:
epid = aid
aid = attr['aid']
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)
)
page = int(epid[2:]) - int(attr['base'][2:]) + 1
info = GetVideoInfo(aid.strip('av'), key, 1)
else:
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))
info = GetVideoInfo(aid.strip('av'), key)
extra['danmaku'] = request_danmaku(cid=info.cid)
if 'country' in attr:
extra['country'] = attr['country']
extra['complete'] = False
else:
capture = get_capture(fn)
print('[*] Capture : {}'.format(fn))
extra['duration'] = get_duration(capture=capture)
extra['duration'] = get_duration(capture=capture)
extra['nframes'] = get_nframes(capture=capture)
extra['fps'] = get_fps(capture=capture)
extra['boundary'] = get_boundary(fn, capture, extra['nframes'],
'hecate')
extra['positions'] = get_positions(extra['nframes'])
extra['fpsegment'] = get_fpsegment(extra['boundary'])
extra['score'] = get_score(**extra)
extra['summary'] = get_summary(**extra)
extra['complete'] = True
for k, v in extra.items():
setattr(info, k, v)
infos[aid] = info
save_pickle(infos, '{}.info'.format(args.d))
<|reserved_special_token_1|>
import os
import argparse
from data.downloader import *
from data.utils import *
from data.danmaku import *
from utils import *
key = '03fc8eb101b091fb'
parser = argparse.ArgumentParser(description='Download Video From Bilibili')
parser.add_argument('-d', type=str, help='dataset')
parser.add_argument('-o', type=str, default='dataset', help='output directory')
parser.add_argument('-f', type=str, default='mp4', help='format')
parser.add_argument('-c', type=str, default='', help='country')
parser.add_argument('-q', type=int, default=0, help='quality')
parser.add_argument('-i', action='store_true', default=False, help=
'ignore download')
args = parser.parse_args()
cookie = dict()
cookie['DedeUserID'] = '347368229'
cookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'
cookie['sid'] = 'ii8ca1k2'
cookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'
aids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.
join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,
debug=True)
print('[*] Video Download Finished')
infos = dict()
for aid in aids:
extra = dict()
if 'ep' in aid:
epid = aid
aid = attr['aid']
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)
)
page = int(epid[2:]) - int(attr['base'][2:]) + 1
info = GetVideoInfo(aid.strip('av'), key, 1)
else:
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))
info = GetVideoInfo(aid.strip('av'), key)
extra['danmaku'] = request_danmaku(cid=info.cid)
if 'country' in attr:
extra['country'] = attr['country']
extra['complete'] = False
else:
capture = get_capture(fn)
print('[*] Capture : {}'.format(fn))
extra['duration'] = get_duration(capture=capture)
extra['duration'] = get_duration(capture=capture)
extra['nframes'] = get_nframes(capture=capture)
extra['fps'] = get_fps(capture=capture)
extra['boundary'] = get_boundary(fn, capture, extra['nframes'],
'hecate')
extra['positions'] = get_positions(extra['nframes'])
extra['fpsegment'] = get_fpsegment(extra['boundary'])
extra['score'] = get_score(**extra)
extra['summary'] = get_summary(**extra)
extra['complete'] = True
for k, v in extra.items():
setattr(info, k, v)
infos[aid] = info
save_pickle(infos, '{}.info'.format(args.d))
|
flexible
|
{
"blob_id": "479411727de14e8032b6d01cdb844632111af688",
"index": 5275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\n<mask token>\nprint('[*] Video Download Finished')\n<mask token>\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-3": "<mask token>\nkey = '03fc8eb101b091fb'\nparser = argparse.ArgumentParser(description='Download Video From Bilibili')\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\nargs = parser.parse_args()\ncookie = dict()\ncookie['DedeUserID'] = '347368229'\ncookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'\ncookie['sid'] = 'ii8ca1k2'\ncookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'\naids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.\n join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,\n debug=True)\nprint('[*] Video Download Finished')\ninfos = dict()\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-4": "import os\nimport argparse\nfrom data.downloader import *\nfrom data.utils import *\nfrom data.danmaku import *\nfrom utils import *\nkey = '03fc8eb101b091fb'\nparser = argparse.ArgumentParser(description='Download Video From Bilibili')\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\nargs = parser.parse_args()\ncookie = dict()\ncookie['DedeUserID'] = '347368229'\ncookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'\ncookie['sid'] = 'ii8ca1k2'\ncookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'\naids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.\n join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,\n debug=True)\nprint('[*] Video Download Finished')\ninfos = dict()\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import numpy as np
class incStat:
def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.w = 0 # weight
self.isTypeJitter = isTypeJitter
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp
if self.isTypeJitter:
if not math.isnan(self.lastTimestamp):
v = t - self.lastTimestamp
else:
v = 0
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
def processDecay(self, timestamp):
factor=1
# check for decay
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def weight(self):
return self.w
def mean(self):
if math.isnan(self.cur_mean): # calculate it only once when necessary
self.cur_mean = self.CF1 / self.w
return self.cur_mean
def var(self):
if math.isnan(self.cur_var): # calculate it only once when necessary
self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))
return self.cur_var
def std(self):
if math.isnan(self.cur_std): # calculate it only once when necessary
self.cur_std = math.sqrt(self.var())
return self.cur_std
#calculates and pulls all stats
def allstats(self):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
return self.w, self.cur_mean, self.cur_var
def getHeaders(self):
return "weight", "mean", "variance"
#like incStat, but maintains stats between two streams
class incStat_2D(incStat):
def __init__(self, Lambda): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.CF3 = None # sum of residules (A-uA)
self.w = 0 # weight
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0 # the value of the last residule
#other_incS_decay is the decay factor of the other incstat
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
# check for decay
factor=1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref): # the radius of two stats
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))
def magnitude(self, istat_ref): # the magnitude of two stats
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))
#covaince approximation using a hold-and-wait model
def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
if math.isnan(self.cur_cov):
self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
return self.cur_cov
# Pearson corl. coef (using a hold-and-wait model)
def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
# calculates and pulls all stats
def allstats2D(self, istat_ref):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
self.cur_std = math.sqrt(self.cur_var)
if istat_ref[0].w != 0:
cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))
radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))
ss = self.cur_std * istat_ref[0].std()
pcc = 0
if ss != 0:
pcc = cov / ss
else:
magnitude = self.cur_mean
radius = self.cur_var
cov = 0
pcc = 0
return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc
def getHeaders(self):
return "weight", "mean", "std", "magnitude", "radius", "covariance", "pcc"
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat:
# Each lambda in the tuple L parameter determines a incStat's decay window size (factor)
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat(l,isTypeJitter))
# returns the weight, mean, and variance of each window
def getStats(self):
allstats = np.zeros(len(self.L)*3) #3 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i*3:(i*3+3)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
# lite only updates incrementals needed for weight, mean, variance, magnitude and radius
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
self.incStats[i].insert(val, timestamp)
# First updates, then gets the stats (weight, mean, and variance only)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self,t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat_2D:
# Each lambda parameter in L determines a incStat's decay window size (factor)
def __init__(self, L):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series
# returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window
def getStats(self):
allstats = np.zeros(len(self.L)*7) #6 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])
allstats[i*7:(i*7+7)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)
# First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
# Joins two windowed_incStat (e.g. rx and tx channels) together.
# other_winStat should be a [] mutable object
def join_with_winStat(self, other_winStat): # prectect with mutexes!
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0,len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self,t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat-1].processDecay(t)
return self.incStats[lastIncStat-1].w
class incStatHT:
# incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.
# The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.
# Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.
# The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.
# If 1-dimensional, set key 2 to the empty string ''.
# If 2-dimensional, key1 should be the target stream
# Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L,isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self,L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings
key = key1 + key2
wis = self.HT.get(key) # get windowed incrimental stat object
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1,key2,L,wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!
# create
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L): # prectect with mutexes!
# create
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self,L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
|
normal
|
{
"blob_id": "7b2ca3db44c5f71c2975bd8af701dafca3b3d081",
"index": 5492,
"step-1": "<mask token>\n\n\nclass windowed_incStat:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-2": "<mask token>\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n <mask token>\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n <mask token>\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-3": "<mask token>\n\n\nclass incStat:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n\n def cov(self, istat_ref):\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(\n istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(\n istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-4": "<mask token>\n\n\nclass incStat:\n\n def __init__(self, Lambda, isTypeJitter=False):\n self.CF1 = 0\n self.CF2 = 0\n self.w = 0\n self.isTypeJitter = isTypeJitter\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def insert(self, v, t=0):\n if self.isTypeJitter:\n if not math.isnan(self.lastTimestamp):\n v = t - self.lastTimestamp\n else:\n v = 0\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def weight(self):\n return self.w\n\n def mean(self):\n if math.isnan(self.cur_mean):\n self.cur_mean = self.CF1 / self.w\n return self.cur_mean\n\n def var(self):\n if math.isnan(self.cur_var):\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))\n return self.cur_var\n\n def std(self):\n if math.isnan(self.cur_std):\n self.cur_std = math.sqrt(self.var())\n return self.cur_std\n\n def allstats(self):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n return self.w, self.cur_mean, self.cur_var\n\n def getHeaders(self):\n return 'weight', 'mean', 'variance'\n\n\nclass incStat_2D(incStat):\n\n def __init__(self, Lambda):\n self.CF1 = 0\n self.CF2 = 0\n self.CF3 = None\n self.w = 0\n self.Lambda = Lambda\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0\n\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay=1):\n self.processDecay(t)\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0\n ] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n factor = 1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, -self.Lambda * timeDiff)\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref):\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].\n var(), 2))\n\n def magnitude(self, istat_ref):\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].\n mean(), 2))\n\n def cov(self, istat_ref):\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n def p_cc(self, istat_ref):\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(\n istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(\n istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return ('weight', 'mean', 'std', 'magnitude', 'radius',\n 'covariance', 'pcc')\n\n\nclass windowed_incStat:\n\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat(l, isTypeJitter))\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 3)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i * 3:i * 3 + 3] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self, t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n\nclass windowed_incStat_2D:\n\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L, reverse=True)\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None\n\n def getStats(self):\n allstats = np.zeros(len(self.L) * 7)\n for i in range(0, len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].\n incStats[i]])\n allstats[i * 7:i * 7 + 7] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0, len(self.incStats)):\n headers = headers + [('L' + str(self.L[i]) + '_' + header) for\n header in self.incStats[i].getHeaders()]\n return headers\n\n def updateStats(self, val, timestamp):\n for i in range(0, len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0]\n .incStats[i].last_residule, decay)\n\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def join_with_winStat(self, other_winStat):\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0, len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self, t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat - 1].processDecay(t)\n return self.incStats[lastIncStat - 1].w\n\n\nclass incStatHT:\n\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False):\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L, isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self, L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\n\nclass incStatHT_2D(incStatHT):\n\n def updateGet_2D(self, key1, key2, val, timestamp, L):\n key = key1 + key2\n wis = self.HT.get(key)\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0], 'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1, key2, L, wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis):\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis[0].join_with_winStat(wis_k2_k1)\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L):\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self, L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-5": "import math\nimport numpy as np\n\n\nclass incStat:\n def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time\n self.CF1 = 0 # linear sum\n self.CF2 = 0 # sum of squares\n self.w = 0 # weight\n self.isTypeJitter = isTypeJitter\n self.Lambda = Lambda # Decay Factor\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp\n if self.isTypeJitter:\n if not math.isnan(self.lastTimestamp):\n v = t - self.lastTimestamp\n else:\n v = 0\n self.processDecay(t)\n\n # update with v\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan # force recalculation if called\n self.cur_var = np.nan\n self.cur_std = np.nan\n\n def processDecay(self, timestamp):\n factor=1\n # check for decay\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, (-self.Lambda * timeDiff))\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def weight(self):\n return self.w\n\n def mean(self):\n if math.isnan(self.cur_mean): # calculate it only once when necessary\n self.cur_mean = self.CF1 / self.w\n return self.cur_mean\n\n def var(self):\n if math.isnan(self.cur_var): # calculate it only once when necessary\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))\n return self.cur_var\n\n def std(self):\n if math.isnan(self.cur_std): # calculate it only once when necessary\n self.cur_std = math.sqrt(self.var())\n return self.cur_std\n\n #calculates and pulls all stats\n def allstats(self):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n return self.w, self.cur_mean, self.cur_var\n\n def getHeaders(self):\n return \"weight\", \"mean\", \"variance\"\n\n#like incStat, but maintains stats between two streams\nclass incStat_2D(incStat):\n def __init__(self, Lambda): # timestamp is creation time\n self.CF1 = 0 # linear sum\n self.CF2 = 0 # sum of squares\n self.CF3 = None # sum of residules (A-uA)\n self.w = 0 # weight\n self.Lambda = Lambda # Decay Factor\n self.lastTimestamp = np.nan\n self.cur_mean = np.nan\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = 0 # the value of the last residule\n\n #other_incS_decay is the decay factor of the other incstat\n def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)\n self.processDecay(t)\n\n # update with v\n self.CF1 = self.CF1 + v\n self.CF2 = self.CF2 + math.pow(v, 2)\n self.w = self.w + 1\n self.cur_mean = np.nan # force recalculation if called\n self.cur_var = np.nan\n self.cur_std = np.nan\n self.cur_cov = np.nan\n self.last_residule = v - self.mean()\n self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay\n\n def processDecay(self, timestamp):\n # check for decay\n factor=1\n if not math.isnan(self.lastTimestamp):\n timeDiff = timestamp - self.lastTimestamp\n factor = math.pow(2, (-self.Lambda * timeDiff))\n self.CF1 = self.CF1 * factor\n self.CF2 = self.CF2 * factor\n if self.CF3 == None:\n self.CF3 = [0]\n self.CF3[0] = self.CF3[0] * factor\n self.w = self.w * factor\n self.lastTimestamp = timestamp\n return factor\n\n def radius(self, istat_ref): # the radius of two stats\n return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))\n\n def magnitude(self, istat_ref): # the magnitude of two stats\n return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))\n\n #covaince approximation using a hold-and-wait model\n def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time\n if math.isnan(self.cur_cov):\n self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n return self.cur_cov\n\n # Pearson corl. coef (using a hold-and-wait model)\n def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time\n ss = self.std() * istat_ref[0].std()\n if ss != 0:\n return self.cov(istat_ref[0]) / ss\n else:\n return 0\n\n # calculates and pulls all stats\n def allstats2D(self, istat_ref):\n self.cur_mean = self.CF1 / self.w\n self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))\n self.cur_std = math.sqrt(self.cur_var)\n\n if istat_ref[0].w != 0:\n cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)\n magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))\n radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))\n ss = self.cur_std * istat_ref[0].std()\n pcc = 0\n if ss != 0:\n pcc = cov / ss\n else:\n magnitude = self.cur_mean\n radius = self.cur_var\n cov = 0\n pcc = 0\n\n return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc\n\n def getHeaders(self):\n return \"weight\", \"mean\", \"std\", \"magnitude\", \"radius\", \"covariance\", \"pcc\"\n\n\n# A set of 3 incremental statistics for a 1 or 2 dimensional time-series\nclass windowed_incStat:\n # Each lambda in the tuple L parameter determines a incStat's decay window size (factor)\n def __init__(self, L, isTypeJitter=False):\n self.incStats = list()\n self.L = sorted(L,reverse=True) #largest lambda to smallest\n for l in self.L:\n self.incStats.append(incStat(l,isTypeJitter))\n\n # returns the weight, mean, and variance of each window\n def getStats(self):\n allstats = np.zeros(len(self.L)*3) #3 stats for each lambda\n for i in range(0,len(self.incStats)):\n stats = self.incStats[i].allstats()\n allstats[i*3:(i*3+3)] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0,len(self.incStats)):\n headers = headers + [\"L\"+str(self.L[i])+\"_\"+header for header in self.incStats[i].getHeaders()]\n return headers\n\n # updates the statistics\n # val is the new observation\n # timestamp is the arrival time of val.\n # lite only updates incrementals needed for weight, mean, variance, magnitude and radius\n def updateStats(self, val, timestamp):\n for i in range(0,len(self.incStats)):\n self.incStats[i].insert(val, timestamp)\n\n # First updates, then gets the stats (weight, mean, and variance only)\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n def getMaxW(self,t):\n mx = 0\n for stat in self.incStats:\n stat.processDecay(t)\n if stat.w > mx:\n mx = stat.w\n return mx\n\n# A set of 3 incremental statistics for a 1 or 2 dimensional time-series\nclass windowed_incStat_2D:\n # Each lambda parameter in L determines a incStat's decay window size (factor)\n def __init__(self, L):\n self.incStats = list()\n self.L = sorted(L,reverse=True) #largest lambda to smallest\n for l in self.L:\n self.incStats.append(incStat_2D(l))\n self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series\n\n # returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window\n def getStats(self):\n allstats = np.zeros(len(self.L)*7) #6 stats for each lambda\n for i in range(0,len(self.incStats)):\n stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])\n allstats[i*7:(i*7+7)] = stats\n return allstats\n\n def getHeaders(self):\n headers = []\n for i in range(0,len(self.incStats)):\n headers = headers + [\"L\"+str(self.L[i])+\"_\"+header for header in self.incStats[i].getHeaders()]\n return headers\n\n # updates the statistics\n # val is the new observation\n # timestamp is the arrival time of val.\n def updateStats(self, val, timestamp):\n for i in range(0,len(self.incStats)):\n decay = self.other_winStat[0].incStats[i].processDecay(timestamp)\n self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)\n\n # First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)\n def updateAndGetStats(self, val, timestamp):\n self.updateStats(val, timestamp)\n return self.getStats()\n\n # Joins two windowed_incStat (e.g. rx and tx channels) together.\n # other_winStat should be a [] mutable object\n def join_with_winStat(self, other_winStat): # prectect with mutexes!\n self.other_winStat = other_winStat\n other_winStat[0].other_winStat = [self]\n for i in range(0,len(self.incStats)):\n self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]\n\n def getMaxW(self,t):\n lastIncStat = len(self.incStats)\n self.incStats[lastIncStat-1].processDecay(t)\n return self.incStats[lastIncStat-1].w\n\nclass incStatHT:\n # incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.\n # The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.\n # Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.\n\n # The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.\n # If 1-dimensional, set key 2 to the empty string ''.\n # If 2-dimensional, key1 should be the target stream\n # Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)\n def __init__(self):\n self.HT = dict()\n\n def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance\n wis = self.HT.get(key)\n if wis is None:\n wis = [windowed_incStat(L,isTypeJitter)]\n self.HT[key] = wis\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def getHeaders_1D(self,L):\n tmp_incs = windowed_incStat(L)\n return tmp_incs.getHeaders()\n\nclass incStatHT_2D(incStatHT):\n def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings\n key = key1 + key2\n wis = self.HT.get(key) # get windowed incrimental stat object\n if wis is None:\n wis = self.create_2D_entry(key1, key2, L)\n elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:\n self.create_1D_entry(key1,key2,L,wis)\n stats = wis[0].updateAndGetStats(val, timestamp)\n return stats\n\n def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!\n # create\n wis_k2_k1 = [windowed_incStat_2D(L)]\n # connect net stats..\n wis[0].join_with_winStat(wis_k2_k1)\n # store\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k2_k1\n\n def create_2D_entry(self, key1, key2, L): # prectect with mutexes!\n # create\n wis_k1_k2 = [windowed_incStat_2D(L)]\n wis_k2_k1 = [windowed_incStat_2D(L)]\n # connect net stats..\n wis_k1_k2[0].join_with_winStat(wis_k2_k1)\n # store\n self.HT[key1 + key2] = wis_k1_k2\n self.HT[key2 + key1] = wis_k2_k1\n return wis_k1_k2\n\n def getHeaders_2D(self,L):\n tmp_incs = windowed_incStat_2D(L)\n return tmp_incs.getHeaders()\n",
"step-ids": [
18,
32,
35,
44,
46
]
}
|
[
18,
32,
35,
44,
46
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_link():
parser = optparse.OptionParser()
parser.add_option('-l', '--link', dest='url', help=
'direct link of file to download .pdf')
url, argument = parser.parse_args()
return url
def download(url):
try:
get_request = requests.get(url)
name_url = url.split('/')[-1]
print(name_url)
with open(name_url, 'wb') as file:
file.write(get_request.content)
except:
print('[-]Print Valid Link')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_link():
parser = optparse.OptionParser()
parser.add_option('-l', '--link', dest='url', help=
'direct link of file to download .pdf')
url, argument = parser.parse_args()
return url
def download(url):
try:
get_request = requests.get(url)
name_url = url.split('/')[-1]
print(name_url)
with open(name_url, 'wb') as file:
file.write(get_request.content)
except:
print('[-]Print Valid Link')
def start():
url_link = get_link()
try:
download(url_link.url)
except:
url_link = input('[+]Enter link:')
download(url_link)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_link():
parser = optparse.OptionParser()
parser.add_option('-l', '--link', dest='url', help=
'direct link of file to download .pdf')
url, argument = parser.parse_args()
return url
def download(url):
try:
get_request = requests.get(url)
name_url = url.split('/')[-1]
print(name_url)
with open(name_url, 'wb') as file:
file.write(get_request.content)
except:
print('[-]Print Valid Link')
def start():
url_link = get_link()
try:
download(url_link.url)
except:
url_link = input('[+]Enter link:')
download(url_link)
start()
<|reserved_special_token_1|>
#!/usr/bin/python2
import requests ,optparse
def get_link():
parser=optparse.OptionParser()
parser.add_option("-l","--link",dest="url",help="direct link of file to download .pdf")
(url,argument)=parser.parse_args()
return url
def download(url):
try:
get_request=requests.get(url)
name_url=url.split("/")[-1]
print(name_url)
with open(name_url,"wb") as file:
file.write(get_request.content)
except:
print("[-]Print Valid Link")
def start():
url_link=get_link()
try:
download(url_link.url)
except:
url_link=input("[+]Enter link:")
download(url_link)
start()
|
flexible
|
{
"blob_id": "22ddae977afd2a1b0a729cf0d56783eaaca3b0a0",
"index": 9813,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_link():\n parser = optparse.OptionParser()\n parser.add_option('-l', '--link', dest='url', help=\n 'direct link of file to download .pdf')\n url, argument = parser.parse_args()\n return url\n\n\ndef download(url):\n try:\n get_request = requests.get(url)\n name_url = url.split('/')[-1]\n print(name_url)\n with open(name_url, 'wb') as file:\n file.write(get_request.content)\n except:\n print('[-]Print Valid Link')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_link():\n parser = optparse.OptionParser()\n parser.add_option('-l', '--link', dest='url', help=\n 'direct link of file to download .pdf')\n url, argument = parser.parse_args()\n return url\n\n\ndef download(url):\n try:\n get_request = requests.get(url)\n name_url = url.split('/')[-1]\n print(name_url)\n with open(name_url, 'wb') as file:\n file.write(get_request.content)\n except:\n print('[-]Print Valid Link')\n\n\ndef start():\n url_link = get_link()\n try:\n download(url_link.url)\n except:\n url_link = input('[+]Enter link:')\n download(url_link)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_link():\n parser = optparse.OptionParser()\n parser.add_option('-l', '--link', dest='url', help=\n 'direct link of file to download .pdf')\n url, argument = parser.parse_args()\n return url\n\n\ndef download(url):\n try:\n get_request = requests.get(url)\n name_url = url.split('/')[-1]\n print(name_url)\n with open(name_url, 'wb') as file:\n file.write(get_request.content)\n except:\n print('[-]Print Valid Link')\n\n\ndef start():\n url_link = get_link()\n try:\n download(url_link.url)\n except:\n url_link = input('[+]Enter link:')\n download(url_link)\n\n\nstart()\n",
"step-5": "#!/usr/bin/python2\n\nimport requests ,optparse\n\n\ndef get_link():\n parser=optparse.OptionParser()\n parser.add_option(\"-l\",\"--link\",dest=\"url\",help=\"direct link of file to download .pdf\")\n (url,argument)=parser.parse_args()\n return url\n\ndef download(url):\n try:\n get_request=requests.get(url)\n name_url=url.split(\"/\")[-1]\n print(name_url)\n with open(name_url,\"wb\") as file:\n file.write(get_request.content)\n except:\n print(\"[-]Print Valid Link\")\n \n \n\n\ndef start():\n url_link=get_link()\n try:\t\n download(url_link.url)\n except:\n url_link=input(\"[+]Enter link:\")\n download(url_link)\n\nstart()\n\n\n\n",
"step-ids": [
0,
2,
3,
4,
6
]
}
|
[
0,
2,
3,
4,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level='DEBUG')
<|reserved_special_token_0|>
client.sendto(message.encode('utf-8'), (serverName, serverPort))
<|reserved_special_token_0|>
print(modifiedMessage.decode('utf-8'))
client.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'david'
<|reserved_special_token_0|>
logging.basicConfig(level='DEBUG')
serverName = '127.0.0.1'
serverPort = 12000
client = UDPClient()
message = input('Input lowercase sentence: ')
client.sendto(message.encode('utf-8'), (serverName, serverPort))
modifiedMessage, serverAddress = client.recvfrom(2048)
print(modifiedMessage.decode('utf-8'))
client.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'david'
import logging
from src.UDP import UDPClient
logging.basicConfig(level='DEBUG')
serverName = '127.0.0.1'
serverPort = 12000
client = UDPClient()
message = input('Input lowercase sentence: ')
client.sendto(message.encode('utf-8'), (serverName, serverPort))
modifiedMessage, serverAddress = client.recvfrom(2048)
print(modifiedMessage.decode('utf-8'))
client.close()
<|reserved_special_token_1|>
#/usr/bin/python
# File: UdpClient.py
# Author: David Zemon
# Project: Project1
#
# Created with: PyCharm Community Edition
"""
@description:
"""
__author__ = 'david'
import logging
from src.UDP import UDPClient
logging.basicConfig(level="DEBUG")
serverName = '127.0.0.1'
serverPort = 12000
client = UDPClient()
message = input("Input lowercase sentence: ")
client.sendto(message.encode('utf-8'), (serverName, serverPort))
modifiedMessage, serverAddress = client.recvfrom(2048)
print(modifiedMessage.decode('utf-8'))
client.close()
|
flexible
|
{
"blob_id": "4d388c912915c3f1f9e433f1342289f0864b3a11",
"index": 409,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level='DEBUG')\n<mask token>\nclient.sendto(message.encode('utf-8'), (serverName, serverPort))\n<mask token>\nprint(modifiedMessage.decode('utf-8'))\nclient.close()\n",
"step-3": "<mask token>\n__author__ = 'david'\n<mask token>\nlogging.basicConfig(level='DEBUG')\nserverName = '127.0.0.1'\nserverPort = 12000\nclient = UDPClient()\nmessage = input('Input lowercase sentence: ')\nclient.sendto(message.encode('utf-8'), (serverName, serverPort))\nmodifiedMessage, serverAddress = client.recvfrom(2048)\nprint(modifiedMessage.decode('utf-8'))\nclient.close()\n",
"step-4": "<mask token>\n__author__ = 'david'\nimport logging\nfrom src.UDP import UDPClient\nlogging.basicConfig(level='DEBUG')\nserverName = '127.0.0.1'\nserverPort = 12000\nclient = UDPClient()\nmessage = input('Input lowercase sentence: ')\nclient.sendto(message.encode('utf-8'), (serverName, serverPort))\nmodifiedMessage, serverAddress = client.recvfrom(2048)\nprint(modifiedMessage.decode('utf-8'))\nclient.close()\n",
"step-5": "#/usr/bin/python\n# File: UdpClient.py\n# Author: David Zemon\n# Project: Project1\n#\n# Created with: PyCharm Community Edition\n\n\"\"\"\n@description:\n\"\"\"\n__author__ = 'david'\n\nimport logging\nfrom src.UDP import UDPClient\n\nlogging.basicConfig(level=\"DEBUG\")\n\nserverName = '127.0.0.1'\nserverPort = 12000\n\nclient = UDPClient()\n\nmessage = input(\"Input lowercase sentence: \")\nclient.sendto(message.encode('utf-8'), (serverName, serverPort))\n\nmodifiedMessage, serverAddress = client.recvfrom(2048)\nprint(modifiedMessage.decode('utf-8'))\nclient.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')
SQLALCHEMY_ECHO = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
CSRF_ENABLED = True
CSRF_SESSION_KEY = '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A'
UPLOAD_FOLDER = '%s/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
|
normal
|
{
"blob_id": "6ee71cf61ae6a79ec0cd06f1ddc7dc614a76c7b9",
"index": 6547,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n_basedir = os.path.abspath(os.path.dirname(__file__))\nDEBUG = True\nSECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')\nSQLALCHEMY_ECHO = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 8\nCSRF_ENABLED = True\nCSRF_SESSION_KEY = (\n '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A')\nUPLOAD_FOLDER = '%s/images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n",
"step-3": "import os\n_basedir = os.path.abspath(os.path.dirname(__file__))\nDEBUG = True\nSECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')\nSQLALCHEMY_ECHO = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 8\nCSRF_ENABLED = True\nCSRF_SESSION_KEY = (\n '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A')\nUPLOAD_FOLDER = '%s/images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n",
"step-4": "import os\n_basedir = os.path.abspath(os.path.dirname(__file__))\n\nDEBUG = True\n\nSECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')\nSQLALCHEMY_ECHO = True\nDATABASE_CONNECT_OPTIONS = {}\n\nTHREADS_PER_PAGE = 8\n\nCSRF_ENABLED = True\nCSRF_SESSION_KEY = '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A'\n\nUPLOAD_FOLDER = '%s/images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
# Create your models here.
class Advertisement(models.Model):
title = models.CharField(max_length=1500, db_index=True, verbose_name='Заголовок')
description = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
price = models.FloatField(verbose_name='цена', default=0)
views_count = models.IntegerField(verbose_name='количество просмотров', default=0)
status = models.ForeignKey('AdvertisementStatus', default=None,
null=True, on_delete=models.CASCADE,
related_name='advertisements', verbose_name='Статус')
def __str__(self):
return self.title
class Meta:
db_table = 'advertisements'
ordering = ['title']
class AdvertisementStatus(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Authors(models.Model):
name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')
email = models.EmailField()
phone = models.CharField(max_length=20, verbose_name='Телефон')
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "c5bdbcc8ba38b02e5e5cf8b53362e87ba761443d",
"index": 8654,
"step-1": "<mask token>\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Advertisement(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Advertisement(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Advertisement(models.Model):\n title = models.CharField(max_length=1500, db_index=True, verbose_name=\n 'Заголовок')\n description = models.TextField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n price = models.FloatField(verbose_name='цена', default=0)\n views_count = models.IntegerField(verbose_name='количество просмотров',\n default=0)\n status = models.ForeignKey('AdvertisementStatus', default=None, null=\n True, on_delete=models.CASCADE, related_name='advertisements',\n verbose_name='Статус')\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\n\n# Create your models here.\n\n\nclass Advertisement(models.Model):\n title = models.CharField(max_length=1500, db_index=True, verbose_name='Заголовок')\n description = models.TextField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n price = models.FloatField(verbose_name='цена', default=0)\n views_count = models.IntegerField(verbose_name='количество просмотров', default=0)\n status = models.ForeignKey('AdvertisementStatus', default=None,\n null=True, on_delete=models.CASCADE,\n related_name='advertisements', verbose_name='Статус')\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n\n\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
'''
config -- config manipulator module for share
@author: shimarin
@copyright: 2014 Walbrix Corporation. All rights reserved.
@license: proprietary
'''
import json,argparse
import oscar,groonga
def parser_setup(parser):
parser.add_argument("base_dir")
parser.add_argument("operations", nargs="*")
parser.set_defaults(func=run)
def get(base_dir, config_name = None):
with oscar.context(base_dir) as context:
with context.command("select") as command:
command.add_argument("table", "Config")
if config_name: command.add_argument("filter", "_key == \"%s\"" % command.escape(config_name))
rows = json.loads(command.execute())[0][2:]
if config_name:
return json.loads(rows[0][2]) if len(rows) > 0 else None
#else
result = {}
for row in rows:
result[row[1]] = json.loads(row[2])
return result
def put(base_dir, config_name, value):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", {"_key":config_name,"value":oscar.to_json(value)})
def put_all(base_dir, configs):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", map(lambda (x,y):{"_key":x,"value":oscar.to_json(y)}, configs.items()))
def show_one(base_dir, config_name):
with oscar.context(base_dir) as context:
print groonga.get(context, "Config", config_name)
def set_one(base_dir, config_name, value):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", {"_key":"config_name","value":"value"})
def run(args):
if len(args.operations) == 0:
print get(args.base_dir)
elif len(args.operations) == 1:
print get(args.base_dir, args.operations[0])
elif len(args.operations) == 2:
put(args.base_dir, args.operations[0], json.loads(args.operations[1]))
else:
raise Exception("Invalid number of arguments")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser_setup(parser)
args = parser.parse_args()
args.func(args)
|
normal
|
{
"blob_id": "8b4590cf2d8c040b6ab31c63baff0d83ab818641",
"index": 5423,
"step-1": "'''\nconfig -- config manipulator module for share\n\n@author: shimarin\n\n@copyright: 2014 Walbrix Corporation. All rights reserved.\n\n@license: proprietary\n'''\n\nimport json,argparse\nimport oscar,groonga\n\ndef parser_setup(parser):\n parser.add_argument(\"base_dir\")\n parser.add_argument(\"operations\", nargs=\"*\")\n parser.set_defaults(func=run)\n\n\ndef get(base_dir, config_name = None):\n with oscar.context(base_dir) as context:\n with context.command(\"select\") as command:\n command.add_argument(\"table\", \"Config\")\n if config_name: command.add_argument(\"filter\", \"_key == \\\"%s\\\"\" % command.escape(config_name))\n rows = json.loads(command.execute())[0][2:]\n if config_name:\n return json.loads(rows[0][2]) if len(rows) > 0 else None\n #else\n result = {}\n for row in rows:\n result[row[1]] = json.loads(row[2])\n return result\n\ndef put(base_dir, config_name, value):\n with oscar.context(base_dir, oscar.min_free_blocks) as context:\n groonga.load(context, \"Config\", {\"_key\":config_name,\"value\":oscar.to_json(value)})\n\ndef put_all(base_dir, configs):\n with oscar.context(base_dir, oscar.min_free_blocks) as context:\n groonga.load(context, \"Config\", map(lambda (x,y):{\"_key\":x,\"value\":oscar.to_json(y)}, configs.items()))\n\ndef show_one(base_dir, config_name):\n with oscar.context(base_dir) as context:\n print groonga.get(context, \"Config\", config_name)\n\ndef set_one(base_dir, config_name, value):\n with oscar.context(base_dir, oscar.min_free_blocks) as context:\n groonga.load(context, \"Config\", {\"_key\":\"config_name\",\"value\":\"value\"})\n\ndef run(args):\n if len(args.operations) == 0:\n print get(args.base_dir)\n elif len(args.operations) == 1:\n print get(args.base_dir, args.operations[0])\n elif len(args.operations) == 2:\n put(args.base_dir, args.operations[0], json.loads(args.operations[1]))\n else:\n raise Exception(\"Invalid number of arguments\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser_setup(parser)\n args = parser.parse_args()\n args.func(args)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from locations.storefinders.stockinstore import StockInStoreSpider
class ScooterHutAUSpider(StockInStoreSpider):
name = "scooter_hut_au"
item_attributes = {"brand": "Scooter Hut", "brand_wikidata": "Q117747623"}
api_site_id = "10112"
api_widget_id = "119"
api_widget_type = "product"
api_origin = "https://scooterhut.com.au"
|
normal
|
{
"blob_id": "e37f4422c1063df50453f7abf72a0a9a31156d8b",
"index": 899,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = 'scooter_hut_au'\n item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}\n api_site_id = '10112'\n api_widget_id = '119'\n api_widget_type = 'product'\n api_origin = 'https://scooterhut.com.au'\n",
"step-4": "from locations.storefinders.stockinstore import StockInStoreSpider\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = 'scooter_hut_au'\n item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}\n api_site_id = '10112'\n api_widget_id = '119'\n api_widget_type = 'product'\n api_origin = 'https://scooterhut.com.au'\n",
"step-5": "from locations.storefinders.stockinstore import StockInStoreSpider\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = \"scooter_hut_au\"\n item_attributes = {\"brand\": \"Scooter Hut\", \"brand_wikidata\": \"Q117747623\"}\n api_site_id = \"10112\"\n api_widget_id = \"119\"\n api_widget_type = \"product\"\n api_origin = \"https://scooterhut.com.au\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import z5py
from shutil import copytree, copyfile
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(
os.path.join(ref_path, 'attributes.json'),
os.path.join(out_path, 'attributes.json')
)
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(
os.path.join(in_path, in_key),
os.path.join(out_path, out_key)
)
# copy training, test and val data to scratch
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print("Copying", name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo))
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
# make_small_volume()
|
normal
|
{
"blob_id": "9d3db4ca5bf964c68e9778a3625c842e74bf9dbd",
"index": 1228,
"step-1": "<mask token>\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)\n )\n raw = ds_r[bb]\n labels = ds_l[bb]\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.\n chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=\n ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n",
"step-3": "<mask token>\nROOT = '/g/kreshuk/pape/Work/data/mito_em/data'\nSCRATCH = '/scratch/pape/mito_em/data'\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)\n )\n raw = ds_r[bb]\n labels = ds_l[bb]\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.\n chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=\n ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n",
"step-4": "import os\nimport z5py\nfrom shutil import copytree, copyfile\nROOT = '/g/kreshuk/pape/Work/data/mito_em/data'\nSCRATCH = '/scratch/pape/mito_em/data'\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(\n out_path, 'attributes.json'))\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n in_key = 'setup0/timepoint0/s0'\n copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))\n\n\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print('Copying', name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images',\n 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)\n )\n raw = ds_r[bb]\n labels = ds_l[bb]\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.\n chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=\n ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n",
"step-5": "import os\nimport z5py\nfrom shutil import copytree, copyfile\n\nROOT = '/g/kreshuk/pape/Work/data/mito_em/data'\nSCRATCH = '/scratch/pape/mito_em/data'\n\n\ndef create_file(out_path, ref_path):\n os.makedirs(out_path, exist_ok=True)\n copyfile(\n os.path.join(ref_path, 'attributes.json'),\n os.path.join(out_path, 'attributes.json')\n )\n\n\ndef copy_to_scratch(in_path, out_path, out_key):\n if out_key in z5py.File(out_path, 'r'):\n return\n\n in_key = 'setup0/timepoint0/s0'\n copytree(\n os.path.join(in_path, in_key),\n os.path.join(out_path, out_key)\n )\n\n\n# copy training, test and val data to scratch\ndef prepare_scratch():\n os.makedirs(SCRATCH, exist_ok=True)\n\n for name in ('rat', 'human'):\n for split in ('train', 'val', 'test'):\n print(\"Copying\", name, split)\n out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')\n\n raw_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-raw.n5')\n create_file(out_path, raw_path)\n copy_to_scratch(raw_path, out_path, 'raw')\n\n label_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-mitos.n5')\n if os.path.exists(label_path):\n copy_to_scratch(label_path, out_path, 'labels')\n\n\ndef make_small_volume():\n in_path = './data/human_train.n5'\n f = z5py.File(in_path, 'r')\n ds_r = f['raw']\n ds_l = f['labels']\n\n halo = [32, 256, 256]\n shape = ds_r.shape\n bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo))\n\n raw = ds_r[bb]\n labels = ds_l[bb]\n\n out_path = './data/small.n5'\n with z5py.File(out_path, 'a') as f:\n f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.chunks)\n f.create_dataset('labels', data=labels, compression='gzip', chunks=ds_l.chunks)\n\n\nif __name__ == '__main__':\n prepare_scratch()\n # make_small_volume()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# Generated by Django 3.0.5 on 2020-04-25 15:35
from django.db import migrations, models
import lots.models
class Migration(migrations.Migration):
dependencies = [
('lots', '0012_auto_20200425_1720'),
]
operations = [
migrations.AlterField(
model_name='lots',
name='photo',
field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename),
),
]
|
normal
|
{
"blob_id": "b36f3ffed888edaa7716f712f1549dc205799caf",
"index": 6338,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lots', '0012_auto_20200425_1720')]\n operations = [migrations.AlterField(model_name='lots', name='photo',\n field=models.ImageField(default='images/default.png', upload_to=\n lots.models.path_and_rename))]\n",
"step-4": "from django.db import migrations, models\nimport lots.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lots', '0012_auto_20200425_1720')]\n operations = [migrations.AlterField(model_name='lots', name='photo',\n field=models.ImageField(default='images/default.png', upload_to=\n lots.models.path_and_rename))]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-04-25 15:35\n\nfrom django.db import migrations, models\nimport lots.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lots', '0012_auto_20200425_1720'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='lots',\n name='photo',\n field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ortools.sat.python import cp_model
import os
import math
import csv
import sys
def ortoolsSolverReduceVar(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
total_fun = sum([fun[i] * play[i] for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverNeg(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i)
for i in range(1, num + 1)]
total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')
model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i)
for i in range(1, num + 1)]
total_fun = sum([fun[i] * play[i] for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
if __name__ == '__main__':
file = sys.argv[1]
f = open(file)
for i in range(5):
exec(f.readline())
f.close()
[sat, token, play, total_fun, time] = ortoolsSolverComb(
num, cap, refill, fun, goal)
print('Status:', sat)
if sat == 'OPTIMAL':
print('Maximum total fun:', total_fun)
|
normal
|
{
"blob_id": "da98835e48a759cbe7bd29ddba1fac20c006827d",
"index": 4996,
"step-1": "<mask token>\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,\n refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n",
"step-4": "from ortools.sat.python import cp_model\nimport os\nimport math\nimport csv\nimport sys\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,\n refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n",
"step-5": "from ortools.sat.python import cp_model\nimport os\nimport math\nimport csv\nimport sys\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n total_fun = sum([fun[i] * play[i] for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i)\n for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i)\n for i in range(1, num + 1)]\n total_fun = sum([fun[i] * play[i] for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(\n num, cap, refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# -*- coding: utf-8
# @paidatocandeira
# Acessa arquivo do CADASTRO NACIONAL DE EMPRESAS INIDÔNEAS E SUSPENSAS (CEIS) que está no portal da Transparência
#
import pandas as pd
# Parte 2 - pode rodar no Jupyter para ver resultados
# Método lendo direto o arquivo disponível para download (http://www.portaltransparencia.gov.br/downloads/snapshot.asp?c=CEIS#get)
ceis_arquivo = pd.read_csv("20180225_CEIS.csv",sep=';',encoding = 'latin_1', converters={'CPF ou CNPJ do Sancionado': lambda x: str(x)})
# É um arquivo CSV comum, pode abrir em outros programas também
ceis_arquivo.reset_index()
# Exemplo de busca - de SP
ceis_arquivo.info() # mostra nomes de todas colunas
ceis_sp = ceis_arquivo[(ceis_arquivo['UF Órgão Sancionador'] == 'SP')]
ceis_sp.to_csv('ceis_sp.csv') # Salva como CSV só o grupo de SP
|
normal
|
{
"blob_id": "d2325b07d11e64df0b26d0de9992a6f496e92a30",
"index": 2879,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nceis_arquivo.reset_index()\nceis_arquivo.info()\n<mask token>\nceis_sp.to_csv('ceis_sp.csv')\n",
"step-3": "<mask token>\nceis_arquivo = pd.read_csv('20180225_CEIS.csv', sep=';', encoding='latin_1',\n converters={'CPF ou CNPJ do Sancionado': lambda x: str(x)})\nceis_arquivo.reset_index()\nceis_arquivo.info()\nceis_sp = ceis_arquivo[ceis_arquivo['UF Órgão Sancionador'] == 'SP']\nceis_sp.to_csv('ceis_sp.csv')\n",
"step-4": "import pandas as pd\nceis_arquivo = pd.read_csv('20180225_CEIS.csv', sep=';', encoding='latin_1',\n converters={'CPF ou CNPJ do Sancionado': lambda x: str(x)})\nceis_arquivo.reset_index()\nceis_arquivo.info()\nceis_sp = ceis_arquivo[ceis_arquivo['UF Órgão Sancionador'] == 'SP']\nceis_sp.to_csv('ceis_sp.csv')\n",
"step-5": "# -*- coding: utf-8\n# @paidatocandeira\n# Acessa arquivo do CADASTRO NACIONAL DE EMPRESAS INIDÔNEAS E SUSPENSAS (CEIS) que está no portal da Transparência\n#\n\nimport pandas as pd\n\n# Parte 2 - pode rodar no Jupyter para ver resultados\n# Método lendo direto o arquivo disponível para download (http://www.portaltransparencia.gov.br/downloads/snapshot.asp?c=CEIS#get)\nceis_arquivo = pd.read_csv(\"20180225_CEIS.csv\",sep=';',encoding = 'latin_1', converters={'CPF ou CNPJ do Sancionado': lambda x: str(x)})\n# É um arquivo CSV comum, pode abrir em outros programas também\nceis_arquivo.reset_index()\n# Exemplo de busca - de SP\nceis_arquivo.info() # mostra nomes de todas colunas\nceis_sp = ceis_arquivo[(ceis_arquivo['UF Órgão Sancionador'] == 'SP')]\nceis_sp.to_csv('ceis_sp.csv') # Salva como CSV só o grupo de SP\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
if __name__ == '__main__':
operations_cnt = 0
operations_cnt = int(input())
operations_i = 0
operations = []
while operations_i < operations_cnt:
try:
operations_item = str(input())
except:
operations_item = None
operations.append(operations_item)
operations_i += 1
res = superStack(operations)
<|reserved_special_token_1|>
#!/bin/python3
# Implement a stack with push, pop, inc(e, k) operations
# inc (e,k) - Add k to each of bottom e elements
import sys
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
if __name__ == "__main__":
operations_cnt = 0
operations_cnt = int(input())
operations_i = 0
operations = []
while operations_i < operations_cnt:
try:
operations_item = str(input())
except:
operations_item = None
operations.append(operations_item)
operations_i += 1
res = superStack(operations);
|
flexible
|
{
"blob_id": "5ed439a2a7cfb9c941c40ea0c5eba2851a0f2855",
"index": 24,
"step-1": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n\n\nif __name__ == '__main__':\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n res = superStack(operations)\n",
"step-5": "#!/bin/python3\n\n# Implement a stack with push, pop, inc(e, k) operations\n# inc (e,k) - Add k to each of bottom e elements\nimport sys\n\nclass Stack(object):\n def __init__(self):\n self.arr = []\n\n def push(self, val):\n self.arr.append(val)\n\n def pop(self):\n if len(self.arr):\n return self.arr.pop()\n\n def inc(self, e, k):\n count = min(len(self.arr), e)\n for i in range(count):\n self.arr[i] += k\n\n def peek(self):\n if len(self.arr):\n return self.arr[-1]\n else:\n return 'EMPTY'\n\ndef superStack(operations):\n s = Stack()\n for o in operations:\n op = o.split(' ')\n if op[0] == 'push':\n s.push(int(op[1]))\n print(s.peek())\n elif op[0] == 'pop':\n s.pop()\n print(s.peek())\n elif op[0] == 'inc':\n s.inc(int(op[1]), int(op[2]))\n print(s.peek())\n \n\nif __name__ == \"__main__\":\n operations_cnt = 0\n operations_cnt = int(input())\n operations_i = 0\n operations = []\n while operations_i < operations_cnt:\n try:\n operations_item = str(input())\n except:\n operations_item = None\n operations.append(operations_item)\n operations_i += 1\n\n\n res = superStack(operations);\n \n\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
#!/usr/bin/env python
# encoding: utf-8
from tree import *
def findKthNode(root, k):
if not root:
return None
if root.number < k or k <= 0:
return None
if k == 1:
return root
if root.left and root.left.number >= k-1:
return findKthNode(root.left, k - 1)
else:
res = 1 if not root.left else root.left.number + 1
return findKthNode(root.right, k -res)
root = testTree
node = findKthNode(root, 3)
if node:
print(node.n)
|
normal
|
{
"blob_id": "b9675bc65e06624c7f039188379b76da8e58fb19",
"index": 1623,
"step-1": "<mask token>\n\n\ndef findKthNode(root, k):\n if not root:\n return None\n if root.number < k or k <= 0:\n return None\n if k == 1:\n return root\n if root.left and root.left.number >= k - 1:\n return findKthNode(root.left, k - 1)\n else:\n res = 1 if not root.left else root.left.number + 1\n return findKthNode(root.right, k - res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef findKthNode(root, k):\n if not root:\n return None\n if root.number < k or k <= 0:\n return None\n if k == 1:\n return root\n if root.left and root.left.number >= k - 1:\n return findKthNode(root.left, k - 1)\n else:\n res = 1 if not root.left else root.left.number + 1\n return findKthNode(root.right, k - res)\n\n\n<mask token>\nif node:\n print(node.n)\n",
"step-3": "<mask token>\n\n\ndef findKthNode(root, k):\n if not root:\n return None\n if root.number < k or k <= 0:\n return None\n if k == 1:\n return root\n if root.left and root.left.number >= k - 1:\n return findKthNode(root.left, k - 1)\n else:\n res = 1 if not root.left else root.left.number + 1\n return findKthNode(root.right, k - res)\n\n\nroot = testTree\nnode = findKthNode(root, 3)\nif node:\n print(node.n)\n",
"step-4": "from tree import *\n\n\ndef findKthNode(root, k):\n if not root:\n return None\n if root.number < k or k <= 0:\n return None\n if k == 1:\n return root\n if root.left and root.left.number >= k - 1:\n return findKthNode(root.left, k - 1)\n else:\n res = 1 if not root.left else root.left.number + 1\n return findKthNode(root.right, k - res)\n\n\nroot = testTree\nnode = findKthNode(root, 3)\nif node:\n print(node.n)\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom tree import *\n\ndef findKthNode(root, k):\n if not root:\n return None\n if root.number < k or k <= 0:\n return None\n if k == 1:\n return root\n if root.left and root.left.number >= k-1:\n return findKthNode(root.left, k - 1)\n else:\n res = 1 if not root.left else root.left.number + 1\n return findKthNode(root.right, k -res)\n\n\nroot = testTree\n\nnode = findKthNode(root, 3)\nif node:\n print(node.n)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class PR:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(data, Config.
eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,
num_workers=Config.loader_num_workers, collate_fn=collate_fn.
collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda
x: -x[1])
return count_pairs_by_relation
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
if self.most_frequent_rels > 0:
print('Evaluating on {} most frequent relations...'.format(self
.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in
prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
r_tensor = batch[1]
r = batch[1].item()
print('Evaluating: {} Progress: {}%'.format(r, round(i / len(
self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]
score_matrix[tuples_r_train] = -math.inf
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if len(tuples_r_valid) > 0:
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[
:, 1]]
score_matrix[tuples_r_valid] = -math.inf
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([
test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print('-----------------------')
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print('TOTAL MAP: {} '.format(avg_map))
print('TOTAL HITS: {}'.format(avg_hits))
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, 'a')
logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)
return avg_map, avg_hits
<|reserved_special_token_0|>
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = total_precision / normalization * 100
avg_hits = total_hits / normalization * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + '/' +
Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data:
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else:
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PR:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(data, Config.
eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,
num_workers=Config.loader_num_workers, collate_fn=collate_fn.
collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda
x: -x[1])
return count_pairs_by_relation
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
if self.most_frequent_rels > 0:
print('Evaluating on {} most frequent relations...'.format(self
.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in
prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
r_tensor = batch[1]
r = batch[1].item()
print('Evaluating: {} Progress: {}%'.format(r, round(i / len(
self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]
score_matrix[tuples_r_train] = -math.inf
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if len(tuples_r_valid) > 0:
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[
:, 1]]
score_matrix[tuples_r_valid] = -math.inf
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([
test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print('-----------------------')
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print('TOTAL MAP: {} '.format(avg_map))
print('TOTAL HITS: {}'.format(avg_hits))
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, 'a')
logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)
return avg_map, avg_hits
def compute_topk(self, score_matrix, tuples_r_1d):
score_matrix = score_matrix.reshape((1, -1)).flatten()
if len(score_matrix) > self.topk + 1:
sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,
self.topk, largest=True, sorted=True)
other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)
tuples_r_1d = tuples_r_1d.to(self.device)
if len(tuples_r_1d.size()) > 0:
check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,
other) for t in tuples_r_1d if len(torch.nonzero(
sorted_k_indexs == t)) > 0]
else:
check = [torch.where(sorted_k_indexs == tuples_r_1d,
sorted_k_indexs, other)]
ranks = [(torch.nonzero(t) + 1) for t in check]
if len(ranks) == 1:
ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]
else:
ranks = torch.LongTensor(ranks).to(self.device)
return ranks
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = total_precision / normalization * 100
avg_hits = total_hits / normalization * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + '/' +
Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data:
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else:
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PR:
dataset = None
eval_data = None
model = None
device = None
most_frequent_rels = None
test_data = None
train_data = None
valid_data = None
eval_test_data = None
topk = None
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(data, Config.
eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,
num_workers=Config.loader_num_workers, collate_fn=collate_fn.
collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda
x: -x[1])
return count_pairs_by_relation
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
if self.most_frequent_rels > 0:
print('Evaluating on {} most frequent relations...'.format(self
.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in
prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
r_tensor = batch[1]
r = batch[1].item()
print('Evaluating: {} Progress: {}%'.format(r, round(i / len(
self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]
score_matrix[tuples_r_train] = -math.inf
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if len(tuples_r_valid) > 0:
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[
:, 1]]
score_matrix[tuples_r_valid] = -math.inf
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([
test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print('-----------------------')
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print('TOTAL MAP: {} '.format(avg_map))
print('TOTAL HITS: {}'.format(avg_hits))
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, 'a')
logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)
return avg_map, avg_hits
def compute_topk(self, score_matrix, tuples_r_1d):
score_matrix = score_matrix.reshape((1, -1)).flatten()
if len(score_matrix) > self.topk + 1:
sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,
self.topk, largest=True, sorted=True)
other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)
tuples_r_1d = tuples_r_1d.to(self.device)
if len(tuples_r_1d.size()) > 0:
check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,
other) for t in tuples_r_1d if len(torch.nonzero(
sorted_k_indexs == t)) > 0]
else:
check = [torch.where(sorted_k_indexs == tuples_r_1d,
sorted_k_indexs, other)]
ranks = [(torch.nonzero(t) + 1) for t in check]
if len(ranks) == 1:
ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]
else:
ranks = torch.LongTensor(ranks).to(self.device)
return ranks
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = total_precision / normalization * 100
avg_hits = total_hits / normalization * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + '/' +
Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data:
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else:
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _Collate:
<|reserved_special_token_0|>
def collate(self, batch):
return torch.squeeze(torch.from_numpy(np.array(batch)))
class PR:
dataset = None
eval_data = None
model = None
device = None
most_frequent_rels = None
test_data = None
train_data = None
valid_data = None
eval_test_data = None
topk = None
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(data, Config.
eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,
num_workers=Config.loader_num_workers, collate_fn=collate_fn.
collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda
x: -x[1])
return count_pairs_by_relation
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
if self.most_frequent_rels > 0:
print('Evaluating on {} most frequent relations...'.format(self
.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in
prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
r_tensor = batch[1]
r = batch[1].item()
print('Evaluating: {} Progress: {}%'.format(r, round(i / len(
self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]
score_matrix[tuples_r_train] = -math.inf
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if len(tuples_r_valid) > 0:
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[
:, 1]]
score_matrix[tuples_r_valid] = -math.inf
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([
test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print('-----------------------')
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print('TOTAL MAP: {} '.format(avg_map))
print('TOTAL HITS: {}'.format(avg_hits))
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, 'a')
logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)
return avg_map, avg_hits
def compute_topk(self, score_matrix, tuples_r_1d):
score_matrix = score_matrix.reshape((1, -1)).flatten()
if len(score_matrix) > self.topk + 1:
sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,
self.topk, largest=True, sorted=True)
other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)
tuples_r_1d = tuples_r_1d.to(self.device)
if len(tuples_r_1d.size()) > 0:
check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,
other) for t in tuples_r_1d if len(torch.nonzero(
sorted_k_indexs == t)) > 0]
else:
check = [torch.where(sorted_k_indexs == tuples_r_1d,
sorted_k_indexs, other)]
ranks = [(torch.nonzero(t) + 1) for t in check]
if len(ranks) == 1:
ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]
else:
ranks = torch.LongTensor(ranks).to(self.device)
return ranks
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = total_precision / normalization * 100
avg_hits = total_hits / normalization * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + '/' +
Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + '/' + Config.
raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data:
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else:
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
<|reserved_special_token_1|>
import torch.utils.data
import torch
import math
from util.helpers import *
from collections import defaultdict as ddict
class _Collate:
def __init__(self, ):
pass
def collate(self, batch):
return torch.squeeze(torch.from_numpy(np.array(batch)))
class PR:
dataset = None
eval_data = None
model = None
device = None
most_frequent_rels = None
test_data = None
train_data = None
valid_data = None
eval_test_data = None
topk = None
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(
data,
Config.eval_batch_size, shuffle=False,
pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers,
collate_fn=collate_fn.collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1])
return count_pairs_by_relation
# computes the position of a tuple for the flattened 1d score matrix
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
#prepare data
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
#tuples_count = np.array([x[1] for x in tuples_by_relation])
# speedup grid search
if self.most_frequent_rels > 0:
print("Evaluating on {} most frequent relations...".format(self.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
# sorted data
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
# only one test triple for a given relation
r_tensor = batch[1]
r = batch[1].item()
print("Evaluating: {} Progress: {}%".format(r, round(i/len(self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
# ----- FILTERING -----
# all e1, e2 for a given relation in test, validation data
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]]
score_matrix[tuples_r_train] = -math.inf # Filter training set out
# Filter validation set out
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if (len(tuples_r_valid) > 0):
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]]
score_matrix[tuples_r_valid] = -math.inf
# ---- /FILTERING -----
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print("-----------------------")
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print("TOTAL MAP: {} ".format(avg_map))
print("TOTAL HITS: {}".format(avg_hits))
# save results
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, "a")
logger.compare_best(avg_map, avg_hits, epoch, "_best", self.model)
return avg_map, avg_hits
def compute_topk(self, score_matrix, tuples_r_1d):
score_matrix = score_matrix.reshape((1, -1)).flatten()
if len(score_matrix) > self.topk+1:
sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True)
other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)
tuples_r_1d = tuples_r_1d.to(self.device)
if len(tuples_r_1d.size()) > 0:
check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0]
else:
check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)]
ranks = [torch.nonzero(t)+1 for t in check]
if len(ranks) == 1: # one or zero elements in ranks
ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]
else:
ranks = torch.LongTensor(ranks).to(self.device)
return ranks
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = (total_precision / normalization) * 100
avg_hits = (total_hits / normalization) * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + "/" + Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else: # use validation set for evaluation and training set for filtering
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
|
flexible
|
{
"blob_id": "606a6e7ecc58ecbb11aa53602599e671514bc537",
"index": 3890,
"step-1": "<mask token>\n\n\nclass PR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n <mask token>\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-2": "<mask token>\n\n\nclass PR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-3": "<mask token>\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-4": "<mask token>\n\n\nclass _Collate:\n <mask token>\n\n def collate(self, batch):\n return torch.squeeze(torch.from_numpy(np.array(batch)))\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n",
"step-5": "import torch.utils.data\nimport torch\nimport math\nfrom util.helpers import *\nfrom collections import defaultdict as ddict\n\nclass _Collate:\n def __init__(self, ):\n pass\n\n def collate(self, batch):\n return torch.squeeze(torch.from_numpy(np.array(batch)))\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(\n data,\n Config.eval_batch_size, shuffle=False,\n pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers,\n collate_fn=collate_fn.collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1])\n return count_pairs_by_relation\n\n # computes the position of a tuple for the flattened 1d score matrix\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n\n def evaluate(self, epoch, logger):\n #prepare data\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n\n relations = np.array([x[0] for x in tuples_by_relation])\n #tuples_count = np.array([x[1] for x in tuples_by_relation])\n\n # speedup grid search\n if self.most_frequent_rels > 0:\n print(\"Evaluating on {} most frequent relations...\".format(self.most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n\n # sorted data\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n\n eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()]\n\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n\n\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n\n batch = batch.to(self.device)\n r = None\n\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n\n else:\n # only one test triple for a given relation\n r_tensor = batch[1]\n r = batch[1].item()\n print(\"Evaluating: {} Progress: {}%\".format(r, round(i/len(self.eval_loader) * 100, 2)))\n scores = ddict(list)\n\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n\n # ----- FILTERING -----\n # all e1, e2 for a given relation in test, validation data\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]]\n\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]]\n\n score_matrix[tuples_r_train] = -math.inf # Filter training set out\n\n # Filter validation set out\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if (len(tuples_r_valid) > 0):\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n\n # ---- /FILTERING -----\n\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n\n print(\"-----------------------\")\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n\n print(\"TOTAL MAP: {} \".format(avg_map))\n print(\"TOTAL HITS: {}\".format(avg_hits))\n\n # save results\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, \"a\")\n logger.compare_best(avg_map, avg_hits, epoch, \"_best\", self.model)\n\n return avg_map, avg_hits\n\n\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n\n if len(score_matrix) > self.topk+1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True)\n\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n\n tuples_r_1d = tuples_r_1d.to(self.device)\n\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)]\n\n ranks = [torch.nonzero(t)+1 for t in check]\n if len(ranks) == 1: # one or zero elements in ranks\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n\n return ranks\n\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n\n avg_map = (total_precision / normalization) * 100\n avg_hits = (total_hits / normalization) * 100\n return avg_map, avg_hits\n\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n\n evaluator.device = torch.device(Config.eval_device)\n\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + \"/\" + Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n\n test_triples = read_triplets(data_dir + dataset + \"/\" + Config.raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n\n valid_triples = read_triplets(data_dir + dataset + \"/\" + Config.raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n\n\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n\n if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else: # use validation set for evaluation and training set for filtering\n evaluator.test_data = valid_triples\n\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n\n return evaluator\n\n\n\n\n \n",
"step-ids": [
7,
8,
9,
11,
14
]
}
|
[
7,
8,
9,
11,
14
] |
<|reserved_special_token_0|>
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
model = User
serializer_class = UserSerializer
def get_permissions(self):
return (AllowAny() if self.request.method == 'POST' else
permissions.IsStaffOrTargetUser(),)
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AuthView(APIView):
authentication_classes = authenticators.QuietBasicAuthentication,
def post(self, request, *args, **kwargs):
login(request, request.user)
return Response(serializers.UserSerializer(request.user).data)
def delete(self, request, *args, **kwargs):
logout(request)
return Response()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TareasViewSet(viewsets.ModelViewSet):
queryset = Tareas.objects.all()
serializer_class = TareasSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
model = User
serializer_class = UserSerializer
def get_permissions(self):
return (AllowAny() if self.request.method == 'POST' else
permissions.IsStaffOrTargetUser(),)
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AuthView(APIView):
authentication_classes = authenticators.QuietBasicAuthentication,
def post(self, request, *args, **kwargs):
login(request, request.user)
return Response(serializers.UserSerializer(request.user).data)
def delete(self, request, *args, **kwargs):
logout(request)
return Response()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EmpleadoViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PuestoViewSet(viewsets.ModelViewSet):
queryset = Puesto.objects.all()
serializer_class = PuestoSerializer
permission_classes = permissions.IsOwner,
class TareasViewSet(viewsets.ModelViewSet):
queryset = Tareas.objects.all()
serializer_class = TareasSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
model = User
serializer_class = UserSerializer
def get_permissions(self):
return (AllowAny() if self.request.method == 'POST' else
permissions.IsStaffOrTargetUser(),)
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AuthView(APIView):
authentication_classes = authenticators.QuietBasicAuthentication,
def post(self, request, *args, **kwargs):
login(request, request.user)
return Response(serializers.UserSerializer(request.user).data)
def delete(self, request, *args, **kwargs):
logout(request)
return Response()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EmpleadoViewSet(viewsets.ModelViewSet):
model = Empleado
serializer_class = EmpleadoSerializer
permission_classes = permissions.IsOwner,
def pre_save(self, obj):
if isinstance(self.request.user, User):
obj.user = self.request.user
class PuestoViewSet(viewsets.ModelViewSet):
queryset = Puesto.objects.all()
serializer_class = PuestoSerializer
permission_classes = permissions.IsOwner,
class TareasViewSet(viewsets.ModelViewSet):
queryset = Tareas.objects.all()
serializer_class = TareasSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
model = User
serializer_class = UserSerializer
def get_permissions(self):
return (AllowAny() if self.request.method == 'POST' else
permissions.IsStaffOrTargetUser(),)
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AuthView(APIView):
authentication_classes = authenticators.QuietBasicAuthentication,
def post(self, request, *args, **kwargs):
login(request, request.user)
return Response(serializers.UserSerializer(request.user).data)
def delete(self, request, *args, **kwargs):
logout(request)
return Response()
<|reserved_special_token_1|>
from ..models import Empleado, Puesto, Tareas
from django.contrib.auth import login, logout
from django.contrib.auth.models import User, Group
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import EmpleadoSerializer, PuestoSerializer, TareasSerializer, UserSerializer, GroupSerializer
from rest_framework import viewsets
from . import permissions, authenticators
class EmpleadoViewSet(viewsets.ModelViewSet):
#queryset = Empleado.objects.all()
model = Empleado
serializer_class = EmpleadoSerializer
permission_classes = (permissions.IsOwner,)
def pre_save(self, obj):
#add user to object if user is logged in
if isinstance(self.request.user, User):
obj.user = self.request.user
class PuestoViewSet(viewsets.ModelViewSet):
queryset = Puesto.objects.all()
#model = Puesto
serializer_class = PuestoSerializer
permission_classes = (permissions.IsOwner,)
class TareasViewSet(viewsets.ModelViewSet):
queryset = Tareas.objects.all()
serializer_class = TareasSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
model = User
serializer_class = UserSerializer
def get_permissions(self):
#Allow non-authenticated user to create
return (AllowAny() if self.request.method == 'POST'
else permissions.IsStaffOrTargetUser()),
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AuthView(APIView):
authentication_classes = (authenticators.QuietBasicAuthentication,)
def post(self, request, *args, **kwargs):
login(request, request.user)
return Response(serializers.UserSerializer(request.user).data)
def delete(self, request, *args, **kwargs):
logout(request)
return Response()
|
flexible
|
{
"blob_id": "cce85d8a34fd20c699b7a87d402b34231b0d5dbb",
"index": 3186,
"step-1": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-2": "<mask token>\n\n\nclass TareasViewSet(viewsets.ModelViewSet):\n queryset = Tareas.objects.all()\n serializer_class = TareasSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-3": "<mask token>\n\n\nclass EmpleadoViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PuestoViewSet(viewsets.ModelViewSet):\n queryset = Puesto.objects.all()\n serializer_class = PuestoSerializer\n permission_classes = permissions.IsOwner,\n\n\nclass TareasViewSet(viewsets.ModelViewSet):\n queryset = Tareas.objects.all()\n serializer_class = TareasSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-4": "<mask token>\n\n\nclass EmpleadoViewSet(viewsets.ModelViewSet):\n model = Empleado\n serializer_class = EmpleadoSerializer\n permission_classes = permissions.IsOwner,\n\n def pre_save(self, obj):\n if isinstance(self.request.user, User):\n obj.user = self.request.user\n\n\nclass PuestoViewSet(viewsets.ModelViewSet):\n queryset = Puesto.objects.all()\n serializer_class = PuestoSerializer\n permission_classes = permissions.IsOwner,\n\n\nclass TareasViewSet(viewsets.ModelViewSet):\n queryset = Tareas.objects.all()\n serializer_class = TareasSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-5": "from ..models import Empleado, Puesto, Tareas\r\nfrom django.contrib.auth import login, logout\r\nfrom django.contrib.auth.models import User, Group\r\nfrom rest_framework.permissions import AllowAny\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.views import APIView\r\nfrom .serializers import EmpleadoSerializer, PuestoSerializer, TareasSerializer, UserSerializer, GroupSerializer\r\nfrom rest_framework import viewsets\r\nfrom . import permissions, authenticators\r\n\r\nclass EmpleadoViewSet(viewsets.ModelViewSet):\r\n\t#queryset = Empleado.objects.all()\r\n\tmodel = Empleado\r\n\tserializer_class = EmpleadoSerializer\r\n\tpermission_classes = (permissions.IsOwner,)\r\n\r\n\tdef pre_save(self, obj):\r\n\t\t#add user to object if user is logged in\r\n\t\tif isinstance(self.request.user, User):\r\n\t\t\tobj.user = self.request.user\r\n\r\nclass PuestoViewSet(viewsets.ModelViewSet):\r\n\tqueryset = Puesto.objects.all()\r\n\t#model = Puesto\r\n\tserializer_class = PuestoSerializer\r\n\tpermission_classes = (permissions.IsOwner,)\r\n\r\nclass TareasViewSet(viewsets.ModelViewSet):\r\n\tqueryset = Tareas.objects.all()\r\n\tserializer_class = TareasSerializer\r\n\r\nclass UserViewSet(viewsets.ModelViewSet):\r\n\tqueryset = User.objects.all()\r\n\tmodel = User\r\n\tserializer_class = UserSerializer\r\n\r\n\tdef get_permissions(self):\r\n\t\t#Allow non-authenticated user to create\r\n\t\treturn (AllowAny() if self.request.method == 'POST'\r\n\t\t\telse permissions.IsStaffOrTargetUser()),\r\n\r\n\r\nclass GroupViewSet(viewsets.ModelViewSet):\r\n\tqueryset = Group.objects.all()\r\n\tserializer_class = GroupSerializer\r\n\r\nclass AuthView(APIView):\r\n\tauthentication_classes = (authenticators.QuietBasicAuthentication,)\r\n\r\n\tdef post(self, request, *args, **kwargs):\r\n\t\tlogin(request, request.user)\r\n\t\treturn Response(serializers.UserSerializer(request.user).data)\r\n\r\n\tdef delete(self, request, *args, **kwargs):\r\n\t\tlogout(request)\r\n\t\treturn Response()\r\n",
"step-ids": [
9,
11,
14,
16,
18
]
}
|
[
9,
11,
14,
16,
18
] |
from openvino.inference_engine import IENetwork, IECore
import numpy as np
import time
from datetime import datetime
import sys
import os
import cv2
class MotionDetect:
# Klasse zur Erkennung von Bewegung
def __init__(self):
self.static_back = None
def detect_motion(self, frame, reset=False):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if self.static_back is None or reset:
self.static_back = gray
return False
diff_frame = cv2.absdiff(self.static_back, gray)
thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)
cnts, _ = cv2.findContours(thresh_frame.copy(),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cnts:
return True
else:
return False
def reset_background(self):
self.static_back = None
class InferenceModel:
# Klasse zur Erstellung eines 'ExecInferModel' Objekts
def __init__(self, device='MYRIAD'):
self.ie = IECore()
self.device = device
def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):
# Anlegen der Pfade zu den Modell Dateien
model_xml = os.path.join(
model_dir, 'frozen_inference_graph.xml')
model_bin = os.path.join(
model_dir, 'frozen_inference_graph.bin')
exported_model = os.path.join(model_dir, 'exported_model')
# Laden der Labels aus 'classes.txt'
labels = [line.strip() for line in open(
os.path.join(model_dir, 'classes.txt')).readlines()]
assert os.path.isfile(model_bin)
assert os.path.isfile(model_xml)
# Erstellung des Modells aus IR Dateien
net = IENetwork(model=model_xml, weights=model_bin)
# In-Output Shapes des Modells aus 'net' laden
img_info_input_blob = None
feed_dict = {}
for blob_name in net.inputs:
if len(net.inputs[blob_name].shape) == 4:
input_blob = blob_name
elif len(net.inputs[blob_name].shape) == 2:
img_info_input_blob = blob_name
else:
raise RuntimeError("Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported"
.format(len(net.inputs[blob_name].shape), blob_name))
assert len(
net.outputs) == 1, "Demo supports only single output topologies"
out_blob = next(iter(net.outputs))
# Modell importieren (Falls vorhanden)
if os.path.isfile(exported_model):
print('found model to import')
try:
exec_net = self.ie.import_network(
model_file=exported_model, device_name=self.device, num_requests=num_requests)
except:
return False
else:
# sonst erstellen und exoportieren
print('creating exec model')
try:
exec_net = self.ie.load_network(
network=net, num_requests=num_requests, device_name=self.device)
exec_net.export(exported_model)
except:
return False
nchw = net.inputs[input_blob].shape
del net
if img_info_input_blob:
feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]
# ersellen und zurückgeben eines ExecInferModel Objekts, mit welchem die Inferenz ausgeführt wird
return ExecInferModel(exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir)
class ExecInferModel:
def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir):
self.exec_net = exec_net
self.labels = labels
self.input_blob = input_blob
self.out_blob = out_blob
self.feed_dict = feed_dict
self.n, self.c, self.h, self.w = nchw
self.current_frames = {}
self.detected_objects = {}
self.output_dir = output_dir
def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save=20, save_all=False):
# Status Variablen
n_infered, n_detected, n_saved = 0, 0, 0
# alle Inferenz Requests durchiterieren
for inf_img_ind, infer_request in enumerate(self.exec_net.requests):
res, frame = None, None
# Status der Inferenz für aktuellen Request abfragen
status = infer_request.wait(0)
# 0: ergebnis da, -11: noch nicht gestartet
if status != 0 and status != -11:
continue
# Ergebnis für aktuellen Request holen
if inf_img_ind in self.current_frames:
res = infer_request.outputs[self.out_blob]
frame = self.current_frames[inf_img_ind]
n_infered += 1
# neuen Inferent Request starten
if len(buffer):
self.current_frames[inf_img_ind] = buffer.pop()
in_frame = cv2.resize(
self.current_frames[inf_img_ind], (self.w, self.h))
in_frame = in_frame.transpose((2, 0, 1))
in_frame = in_frame.reshape(
(self.n, self.c, self.h, self.w))
self.feed_dict[self.input_blob] = in_frame
infer_request.async_infer(self.feed_dict)
# Ergebnis verarbeiten
if res is None or frame is None:
continue
height, width = frame.shape[:2]
# inferenz ergebnisse für ein frame durchiterieren
for obj in res[0][0]:
# Threshold prüfen
if obj[2] < threshhold:
continue
n_detected += 1
# Boundig Box koordinalte aus Erg laden
xmin = int(obj[3] * width)
ymin = int(obj[4] * height)
xmax = int(obj[5] * width)
ymax = int(obj[6] * height)
# ID der erkannten Klasse
class_id = int(obj[1])
# Bounding Box in das Bild zeichnen
cv2.rectangle(frame, (xmin, ymin),
(xmax, ymax), color=(0, 255, 255), thickness=2)
cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)
# detected_objects dict anlegen mit key:class_id, value:[N, Roi, proba]
if not class_id in self.detected_objects:
self.detected_objects[class_id] = [
0, frame, obj[2]]
else:
self.detected_objects[class_id][0] += 1
# wenn wahrscheinlichkeit höher als bei gespeicherten, ersetzen
if self.detected_objects[class_id][2] < obj[2]:
self.detected_objects[class_id][1] = frame
self.detected_objects[class_id][2] = obj[2]
# nach 'n_save' abspeicher
if self.detected_objects[class_id][0] > n_save:
n_saved += 1
self._save(class_id)
del self.detected_objects[class_id]
if view_result:
cv2.imshow('infer result', frame)
cv2.waitKey(1)
# alle aus 'detected_objects' lokal speichern
if save_all:
print('saving all')
for class_id in self.detected_objects.keys():
self._save(class_id)
n_saved += 1
self.detected_objects = {}
return n_infered, n_detected, n_saved
# Funkiont zum speichern der Bilder
def _save(self, class_id):
class_name = self.labels[class_id - 1]
print('saving ', class_name)
time_stamp = datetime.now().strftime("%d-%b-%Y_%H-%M-%S")
file_name = time_stamp + '_' + class_name + '.jpg'
image_array = self.detected_objects[class_id][1]
# save image local
cv2.imwrite(os.path.join(
self.output_dir, file_name), image_array)
|
normal
|
{
"blob_id": "fbd7868a37a2270e5dc86843adff50a94436404d",
"index": 5899,
"step-1": "<mask token>\n\n\nclass MotionDetect:\n\n def __init__(self):\n self.static_back = None\n <mask token>\n <mask token>\n\n\nclass InferenceModel:\n\n def __init__(self, device='MYRIAD'):\n self.ie = IECore()\n self.device = device\n\n def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):\n model_xml = os.path.join(model_dir, 'frozen_inference_graph.xml')\n model_bin = os.path.join(model_dir, 'frozen_inference_graph.bin')\n exported_model = os.path.join(model_dir, 'exported_model')\n labels = [line.strip() for line in open(os.path.join(model_dir,\n 'classes.txt')).readlines()]\n assert os.path.isfile(model_bin)\n assert os.path.isfile(model_xml)\n net = IENetwork(model=model_xml, weights=model_bin)\n img_info_input_blob = None\n feed_dict = {}\n for blob_name in net.inputs:\n if len(net.inputs[blob_name].shape) == 4:\n input_blob = blob_name\n elif len(net.inputs[blob_name].shape) == 2:\n img_info_input_blob = blob_name\n else:\n raise RuntimeError(\n \"Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported\"\n .format(len(net.inputs[blob_name].shape), blob_name))\n assert len(net.outputs\n ) == 1, 'Demo supports only single output topologies'\n out_blob = next(iter(net.outputs))\n if os.path.isfile(exported_model):\n print('found model to import')\n try:\n exec_net = self.ie.import_network(model_file=exported_model,\n device_name=self.device, num_requests=num_requests)\n except:\n return False\n else:\n print('creating exec model')\n try:\n exec_net = self.ie.load_network(network=net, num_requests=\n num_requests, device_name=self.device)\n exec_net.export(exported_model)\n except:\n return False\n nchw = net.inputs[input_blob].shape\n del net\n if img_info_input_blob:\n feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]\n return ExecInferModel(exec_net, input_blob, out_blob, feed_dict,\n nchw, labels, output_dir)\n\n\nclass ExecInferModel:\n\n def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw,\n labels, output_dir):\n self.exec_net = exec_net\n self.labels = labels\n self.input_blob = input_blob\n self.out_blob = out_blob\n self.feed_dict = feed_dict\n self.n, self.c, self.h, self.w = nchw\n self.current_frames = {}\n self.detected_objects = {}\n self.output_dir = output_dir\n\n def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save\n =20, save_all=False):\n n_infered, n_detected, n_saved = 0, 0, 0\n for inf_img_ind, infer_request in enumerate(self.exec_net.requests):\n res, frame = None, None\n status = infer_request.wait(0)\n if status != 0 and status != -11:\n continue\n if inf_img_ind in self.current_frames:\n res = infer_request.outputs[self.out_blob]\n frame = self.current_frames[inf_img_ind]\n n_infered += 1\n if len(buffer):\n self.current_frames[inf_img_ind] = buffer.pop()\n in_frame = cv2.resize(self.current_frames[inf_img_ind], (\n self.w, self.h))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))\n self.feed_dict[self.input_blob] = in_frame\n infer_request.async_infer(self.feed_dict)\n if res is None or frame is None:\n continue\n height, width = frame.shape[:2]\n for obj in res[0][0]:\n if obj[2] < threshhold:\n continue\n n_detected += 1\n xmin = int(obj[3] * width)\n ymin = int(obj[4] * height)\n xmax = int(obj[5] * width)\n ymax = int(obj[6] * height)\n class_id = int(obj[1])\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, \n 255, 255), thickness=2)\n cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(\n round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7), cv2.\n FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)\n if not class_id in self.detected_objects:\n self.detected_objects[class_id] = [0, frame, obj[2]]\n else:\n self.detected_objects[class_id][0] += 1\n if self.detected_objects[class_id][2] < obj[2]:\n self.detected_objects[class_id][1] = frame\n self.detected_objects[class_id][2] = obj[2]\n if self.detected_objects[class_id][0] > n_save:\n n_saved += 1\n self._save(class_id)\n del self.detected_objects[class_id]\n if view_result:\n cv2.imshow('infer result', frame)\n cv2.waitKey(1)\n if save_all:\n print('saving all')\n for class_id in self.detected_objects.keys():\n self._save(class_id)\n n_saved += 1\n self.detected_objects = {}\n return n_infered, n_detected, n_saved\n\n def _save(self, class_id):\n class_name = self.labels[class_id - 1]\n print('saving ', class_name)\n time_stamp = datetime.now().strftime('%d-%b-%Y_%H-%M-%S')\n file_name = time_stamp + '_' + class_name + '.jpg'\n image_array = self.detected_objects[class_id][1]\n cv2.imwrite(os.path.join(self.output_dir, file_name), image_array)\n",
"step-2": "<mask token>\n\n\nclass MotionDetect:\n\n def __init__(self):\n self.static_back = None\n\n def detect_motion(self, frame, reset=False):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if self.static_back is None or reset:\n self.static_back = gray\n return False\n diff_frame = cv2.absdiff(self.static_back, gray)\n thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]\n thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)\n cnts, _ = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n if cnts:\n return True\n else:\n return False\n <mask token>\n\n\nclass InferenceModel:\n\n def __init__(self, device='MYRIAD'):\n self.ie = IECore()\n self.device = device\n\n def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):\n model_xml = os.path.join(model_dir, 'frozen_inference_graph.xml')\n model_bin = os.path.join(model_dir, 'frozen_inference_graph.bin')\n exported_model = os.path.join(model_dir, 'exported_model')\n labels = [line.strip() for line in open(os.path.join(model_dir,\n 'classes.txt')).readlines()]\n assert os.path.isfile(model_bin)\n assert os.path.isfile(model_xml)\n net = IENetwork(model=model_xml, weights=model_bin)\n img_info_input_blob = None\n feed_dict = {}\n for blob_name in net.inputs:\n if len(net.inputs[blob_name].shape) == 4:\n input_blob = blob_name\n elif len(net.inputs[blob_name].shape) == 2:\n img_info_input_blob = blob_name\n else:\n raise RuntimeError(\n \"Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported\"\n .format(len(net.inputs[blob_name].shape), blob_name))\n assert len(net.outputs\n ) == 1, 'Demo supports only single output topologies'\n out_blob = next(iter(net.outputs))\n if os.path.isfile(exported_model):\n print('found model to import')\n try:\n exec_net = self.ie.import_network(model_file=exported_model,\n device_name=self.device, num_requests=num_requests)\n except:\n return False\n else:\n print('creating exec model')\n try:\n exec_net = self.ie.load_network(network=net, num_requests=\n num_requests, device_name=self.device)\n exec_net.export(exported_model)\n except:\n return False\n nchw = net.inputs[input_blob].shape\n del net\n if img_info_input_blob:\n feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]\n return ExecInferModel(exec_net, input_blob, out_blob, feed_dict,\n nchw, labels, output_dir)\n\n\nclass ExecInferModel:\n\n def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw,\n labels, output_dir):\n self.exec_net = exec_net\n self.labels = labels\n self.input_blob = input_blob\n self.out_blob = out_blob\n self.feed_dict = feed_dict\n self.n, self.c, self.h, self.w = nchw\n self.current_frames = {}\n self.detected_objects = {}\n self.output_dir = output_dir\n\n def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save\n =20, save_all=False):\n n_infered, n_detected, n_saved = 0, 0, 0\n for inf_img_ind, infer_request in enumerate(self.exec_net.requests):\n res, frame = None, None\n status = infer_request.wait(0)\n if status != 0 and status != -11:\n continue\n if inf_img_ind in self.current_frames:\n res = infer_request.outputs[self.out_blob]\n frame = self.current_frames[inf_img_ind]\n n_infered += 1\n if len(buffer):\n self.current_frames[inf_img_ind] = buffer.pop()\n in_frame = cv2.resize(self.current_frames[inf_img_ind], (\n self.w, self.h))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))\n self.feed_dict[self.input_blob] = in_frame\n infer_request.async_infer(self.feed_dict)\n if res is None or frame is None:\n continue\n height, width = frame.shape[:2]\n for obj in res[0][0]:\n if obj[2] < threshhold:\n continue\n n_detected += 1\n xmin = int(obj[3] * width)\n ymin = int(obj[4] * height)\n xmax = int(obj[5] * width)\n ymax = int(obj[6] * height)\n class_id = int(obj[1])\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, \n 255, 255), thickness=2)\n cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(\n round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7), cv2.\n FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)\n if not class_id in self.detected_objects:\n self.detected_objects[class_id] = [0, frame, obj[2]]\n else:\n self.detected_objects[class_id][0] += 1\n if self.detected_objects[class_id][2] < obj[2]:\n self.detected_objects[class_id][1] = frame\n self.detected_objects[class_id][2] = obj[2]\n if self.detected_objects[class_id][0] > n_save:\n n_saved += 1\n self._save(class_id)\n del self.detected_objects[class_id]\n if view_result:\n cv2.imshow('infer result', frame)\n cv2.waitKey(1)\n if save_all:\n print('saving all')\n for class_id in self.detected_objects.keys():\n self._save(class_id)\n n_saved += 1\n self.detected_objects = {}\n return n_infered, n_detected, n_saved\n\n def _save(self, class_id):\n class_name = self.labels[class_id - 1]\n print('saving ', class_name)\n time_stamp = datetime.now().strftime('%d-%b-%Y_%H-%M-%S')\n file_name = time_stamp + '_' + class_name + '.jpg'\n image_array = self.detected_objects[class_id][1]\n cv2.imwrite(os.path.join(self.output_dir, file_name), image_array)\n",
"step-3": "<mask token>\n\n\nclass MotionDetect:\n\n def __init__(self):\n self.static_back = None\n\n def detect_motion(self, frame, reset=False):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if self.static_back is None or reset:\n self.static_back = gray\n return False\n diff_frame = cv2.absdiff(self.static_back, gray)\n thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]\n thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)\n cnts, _ = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n if cnts:\n return True\n else:\n return False\n\n def reset_background(self):\n self.static_back = None\n\n\nclass InferenceModel:\n\n def __init__(self, device='MYRIAD'):\n self.ie = IECore()\n self.device = device\n\n def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):\n model_xml = os.path.join(model_dir, 'frozen_inference_graph.xml')\n model_bin = os.path.join(model_dir, 'frozen_inference_graph.bin')\n exported_model = os.path.join(model_dir, 'exported_model')\n labels = [line.strip() for line in open(os.path.join(model_dir,\n 'classes.txt')).readlines()]\n assert os.path.isfile(model_bin)\n assert os.path.isfile(model_xml)\n net = IENetwork(model=model_xml, weights=model_bin)\n img_info_input_blob = None\n feed_dict = {}\n for blob_name in net.inputs:\n if len(net.inputs[blob_name].shape) == 4:\n input_blob = blob_name\n elif len(net.inputs[blob_name].shape) == 2:\n img_info_input_blob = blob_name\n else:\n raise RuntimeError(\n \"Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported\"\n .format(len(net.inputs[blob_name].shape), blob_name))\n assert len(net.outputs\n ) == 1, 'Demo supports only single output topologies'\n out_blob = next(iter(net.outputs))\n if os.path.isfile(exported_model):\n print('found model to import')\n try:\n exec_net = self.ie.import_network(model_file=exported_model,\n device_name=self.device, num_requests=num_requests)\n except:\n return False\n else:\n print('creating exec model')\n try:\n exec_net = self.ie.load_network(network=net, num_requests=\n num_requests, device_name=self.device)\n exec_net.export(exported_model)\n except:\n return False\n nchw = net.inputs[input_blob].shape\n del net\n if img_info_input_blob:\n feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]\n return ExecInferModel(exec_net, input_blob, out_blob, feed_dict,\n nchw, labels, output_dir)\n\n\nclass ExecInferModel:\n\n def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw,\n labels, output_dir):\n self.exec_net = exec_net\n self.labels = labels\n self.input_blob = input_blob\n self.out_blob = out_blob\n self.feed_dict = feed_dict\n self.n, self.c, self.h, self.w = nchw\n self.current_frames = {}\n self.detected_objects = {}\n self.output_dir = output_dir\n\n def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save\n =20, save_all=False):\n n_infered, n_detected, n_saved = 0, 0, 0\n for inf_img_ind, infer_request in enumerate(self.exec_net.requests):\n res, frame = None, None\n status = infer_request.wait(0)\n if status != 0 and status != -11:\n continue\n if inf_img_ind in self.current_frames:\n res = infer_request.outputs[self.out_blob]\n frame = self.current_frames[inf_img_ind]\n n_infered += 1\n if len(buffer):\n self.current_frames[inf_img_ind] = buffer.pop()\n in_frame = cv2.resize(self.current_frames[inf_img_ind], (\n self.w, self.h))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))\n self.feed_dict[self.input_blob] = in_frame\n infer_request.async_infer(self.feed_dict)\n if res is None or frame is None:\n continue\n height, width = frame.shape[:2]\n for obj in res[0][0]:\n if obj[2] < threshhold:\n continue\n n_detected += 1\n xmin = int(obj[3] * width)\n ymin = int(obj[4] * height)\n xmax = int(obj[5] * width)\n ymax = int(obj[6] * height)\n class_id = int(obj[1])\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, \n 255, 255), thickness=2)\n cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(\n round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7), cv2.\n FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)\n if not class_id in self.detected_objects:\n self.detected_objects[class_id] = [0, frame, obj[2]]\n else:\n self.detected_objects[class_id][0] += 1\n if self.detected_objects[class_id][2] < obj[2]:\n self.detected_objects[class_id][1] = frame\n self.detected_objects[class_id][2] = obj[2]\n if self.detected_objects[class_id][0] > n_save:\n n_saved += 1\n self._save(class_id)\n del self.detected_objects[class_id]\n if view_result:\n cv2.imshow('infer result', frame)\n cv2.waitKey(1)\n if save_all:\n print('saving all')\n for class_id in self.detected_objects.keys():\n self._save(class_id)\n n_saved += 1\n self.detected_objects = {}\n return n_infered, n_detected, n_saved\n\n def _save(self, class_id):\n class_name = self.labels[class_id - 1]\n print('saving ', class_name)\n time_stamp = datetime.now().strftime('%d-%b-%Y_%H-%M-%S')\n file_name = time_stamp + '_' + class_name + '.jpg'\n image_array = self.detected_objects[class_id][1]\n cv2.imwrite(os.path.join(self.output_dir, file_name), image_array)\n",
"step-4": "from openvino.inference_engine import IENetwork, IECore\nimport numpy as np\nimport time\nfrom datetime import datetime\nimport sys\nimport os\nimport cv2\n\n\nclass MotionDetect:\n\n def __init__(self):\n self.static_back = None\n\n def detect_motion(self, frame, reset=False):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if self.static_back is None or reset:\n self.static_back = gray\n return False\n diff_frame = cv2.absdiff(self.static_back, gray)\n thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]\n thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)\n cnts, _ = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n if cnts:\n return True\n else:\n return False\n\n def reset_background(self):\n self.static_back = None\n\n\nclass InferenceModel:\n\n def __init__(self, device='MYRIAD'):\n self.ie = IECore()\n self.device = device\n\n def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):\n model_xml = os.path.join(model_dir, 'frozen_inference_graph.xml')\n model_bin = os.path.join(model_dir, 'frozen_inference_graph.bin')\n exported_model = os.path.join(model_dir, 'exported_model')\n labels = [line.strip() for line in open(os.path.join(model_dir,\n 'classes.txt')).readlines()]\n assert os.path.isfile(model_bin)\n assert os.path.isfile(model_xml)\n net = IENetwork(model=model_xml, weights=model_bin)\n img_info_input_blob = None\n feed_dict = {}\n for blob_name in net.inputs:\n if len(net.inputs[blob_name].shape) == 4:\n input_blob = blob_name\n elif len(net.inputs[blob_name].shape) == 2:\n img_info_input_blob = blob_name\n else:\n raise RuntimeError(\n \"Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported\"\n .format(len(net.inputs[blob_name].shape), blob_name))\n assert len(net.outputs\n ) == 1, 'Demo supports only single output topologies'\n out_blob = next(iter(net.outputs))\n if os.path.isfile(exported_model):\n print('found model to import')\n try:\n exec_net = self.ie.import_network(model_file=exported_model,\n device_name=self.device, num_requests=num_requests)\n except:\n return False\n else:\n print('creating exec model')\n try:\n exec_net = self.ie.load_network(network=net, num_requests=\n num_requests, device_name=self.device)\n exec_net.export(exported_model)\n except:\n return False\n nchw = net.inputs[input_blob].shape\n del net\n if img_info_input_blob:\n feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]\n return ExecInferModel(exec_net, input_blob, out_blob, feed_dict,\n nchw, labels, output_dir)\n\n\nclass ExecInferModel:\n\n def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw,\n labels, output_dir):\n self.exec_net = exec_net\n self.labels = labels\n self.input_blob = input_blob\n self.out_blob = out_blob\n self.feed_dict = feed_dict\n self.n, self.c, self.h, self.w = nchw\n self.current_frames = {}\n self.detected_objects = {}\n self.output_dir = output_dir\n\n def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save\n =20, save_all=False):\n n_infered, n_detected, n_saved = 0, 0, 0\n for inf_img_ind, infer_request in enumerate(self.exec_net.requests):\n res, frame = None, None\n status = infer_request.wait(0)\n if status != 0 and status != -11:\n continue\n if inf_img_ind in self.current_frames:\n res = infer_request.outputs[self.out_blob]\n frame = self.current_frames[inf_img_ind]\n n_infered += 1\n if len(buffer):\n self.current_frames[inf_img_ind] = buffer.pop()\n in_frame = cv2.resize(self.current_frames[inf_img_ind], (\n self.w, self.h))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape((self.n, self.c, self.h, self.w))\n self.feed_dict[self.input_blob] = in_frame\n infer_request.async_infer(self.feed_dict)\n if res is None or frame is None:\n continue\n height, width = frame.shape[:2]\n for obj in res[0][0]:\n if obj[2] < threshhold:\n continue\n n_detected += 1\n xmin = int(obj[3] * width)\n ymin = int(obj[4] * height)\n xmax = int(obj[5] * width)\n ymax = int(obj[6] * height)\n class_id = int(obj[1])\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, \n 255, 255), thickness=2)\n cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(\n round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7), cv2.\n FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)\n if not class_id in self.detected_objects:\n self.detected_objects[class_id] = [0, frame, obj[2]]\n else:\n self.detected_objects[class_id][0] += 1\n if self.detected_objects[class_id][2] < obj[2]:\n self.detected_objects[class_id][1] = frame\n self.detected_objects[class_id][2] = obj[2]\n if self.detected_objects[class_id][0] > n_save:\n n_saved += 1\n self._save(class_id)\n del self.detected_objects[class_id]\n if view_result:\n cv2.imshow('infer result', frame)\n cv2.waitKey(1)\n if save_all:\n print('saving all')\n for class_id in self.detected_objects.keys():\n self._save(class_id)\n n_saved += 1\n self.detected_objects = {}\n return n_infered, n_detected, n_saved\n\n def _save(self, class_id):\n class_name = self.labels[class_id - 1]\n print('saving ', class_name)\n time_stamp = datetime.now().strftime('%d-%b-%Y_%H-%M-%S')\n file_name = time_stamp + '_' + class_name + '.jpg'\n image_array = self.detected_objects[class_id][1]\n cv2.imwrite(os.path.join(self.output_dir, file_name), image_array)\n",
"step-5": "from openvino.inference_engine import IENetwork, IECore\nimport numpy as np\nimport time\nfrom datetime import datetime\nimport sys\nimport os\nimport cv2\n\n\nclass MotionDetect:\n # Klasse zur Erkennung von Bewegung\n def __init__(self):\n self.static_back = None\n\n def detect_motion(self, frame, reset=False):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if self.static_back is None or reset:\n self.static_back = gray\n return False\n diff_frame = cv2.absdiff(self.static_back, gray)\n thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]\n thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)\n cnts, _ = cv2.findContours(thresh_frame.copy(),\n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if cnts:\n return True\n else:\n return False\n\n def reset_background(self):\n self.static_back = None\n\n\nclass InferenceModel:\n # Klasse zur Erstellung eines 'ExecInferModel' Objekts\n def __init__(self, device='MYRIAD'):\n self.ie = IECore()\n self.device = device\n\n def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):\n\n # Anlegen der Pfade zu den Modell Dateien\n model_xml = os.path.join(\n model_dir, 'frozen_inference_graph.xml')\n model_bin = os.path.join(\n model_dir, 'frozen_inference_graph.bin')\n exported_model = os.path.join(model_dir, 'exported_model')\n\n # Laden der Labels aus 'classes.txt'\n labels = [line.strip() for line in open(\n os.path.join(model_dir, 'classes.txt')).readlines()]\n\n assert os.path.isfile(model_bin)\n assert os.path.isfile(model_xml)\n\n # Erstellung des Modells aus IR Dateien\n net = IENetwork(model=model_xml, weights=model_bin)\n\n # In-Output Shapes des Modells aus 'net' laden\n img_info_input_blob = None\n feed_dict = {}\n for blob_name in net.inputs:\n if len(net.inputs[blob_name].shape) == 4:\n input_blob = blob_name\n elif len(net.inputs[blob_name].shape) == 2:\n img_info_input_blob = blob_name\n else:\n raise RuntimeError(\"Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported\"\n .format(len(net.inputs[blob_name].shape), blob_name))\n\n assert len(\n net.outputs) == 1, \"Demo supports only single output topologies\"\n out_blob = next(iter(net.outputs))\n\n # Modell importieren (Falls vorhanden)\n if os.path.isfile(exported_model):\n print('found model to import')\n try:\n exec_net = self.ie.import_network(\n model_file=exported_model, device_name=self.device, num_requests=num_requests)\n except:\n return False\n else:\n # sonst erstellen und exoportieren\n print('creating exec model')\n try:\n exec_net = self.ie.load_network(\n network=net, num_requests=num_requests, device_name=self.device)\n exec_net.export(exported_model)\n\n except:\n return False\n nchw = net.inputs[input_blob].shape\n\n del net\n if img_info_input_blob:\n feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]\n\n # ersellen und zurückgeben eines ExecInferModel Objekts, mit welchem die Inferenz ausgeführt wird\n return ExecInferModel(exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir)\n\n\nclass ExecInferModel:\n def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir):\n self.exec_net = exec_net\n self.labels = labels\n self.input_blob = input_blob\n self.out_blob = out_blob\n self.feed_dict = feed_dict\n self.n, self.c, self.h, self.w = nchw\n self.current_frames = {}\n self.detected_objects = {}\n self.output_dir = output_dir\n\n def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save=20, save_all=False):\n\n # Status Variablen\n n_infered, n_detected, n_saved = 0, 0, 0\n\n # alle Inferenz Requests durchiterieren\n for inf_img_ind, infer_request in enumerate(self.exec_net.requests):\n\n res, frame = None, None\n\n # Status der Inferenz für aktuellen Request abfragen\n status = infer_request.wait(0)\n\n # 0: ergebnis da, -11: noch nicht gestartet\n if status != 0 and status != -11:\n continue\n\n # Ergebnis für aktuellen Request holen\n if inf_img_ind in self.current_frames:\n res = infer_request.outputs[self.out_blob]\n frame = self.current_frames[inf_img_ind]\n n_infered += 1\n\n # neuen Inferent Request starten\n if len(buffer):\n self.current_frames[inf_img_ind] = buffer.pop()\n in_frame = cv2.resize(\n self.current_frames[inf_img_ind], (self.w, self.h))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(\n (self.n, self.c, self.h, self.w))\n self.feed_dict[self.input_blob] = in_frame\n infer_request.async_infer(self.feed_dict)\n\n # Ergebnis verarbeiten\n if res is None or frame is None:\n continue\n\n height, width = frame.shape[:2]\n # inferenz ergebnisse für ein frame durchiterieren\n for obj in res[0][0]:\n\n # Threshold prüfen\n if obj[2] < threshhold:\n continue\n\n n_detected += 1\n\n # Boundig Box koordinalte aus Erg laden\n xmin = int(obj[3] * width)\n ymin = int(obj[4] * height)\n xmax = int(obj[5] * width)\n ymax = int(obj[6] * height)\n\n # ID der erkannten Klasse\n class_id = int(obj[1])\n\n # Bounding Box in das Bild zeichnen\n cv2.rectangle(frame, (xmin, ymin),\n (xmax, ymax), color=(0, 255, 255), thickness=2)\n\n cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7),\n cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)\n\n # detected_objects dict anlegen mit key:class_id, value:[N, Roi, proba]\n if not class_id in self.detected_objects:\n self.detected_objects[class_id] = [\n 0, frame, obj[2]]\n else:\n self.detected_objects[class_id][0] += 1\n # wenn wahrscheinlichkeit höher als bei gespeicherten, ersetzen\n if self.detected_objects[class_id][2] < obj[2]:\n self.detected_objects[class_id][1] = frame\n self.detected_objects[class_id][2] = obj[2]\n\n # nach 'n_save' abspeicher\n if self.detected_objects[class_id][0] > n_save:\n n_saved += 1\n self._save(class_id)\n del self.detected_objects[class_id]\n if view_result:\n cv2.imshow('infer result', frame)\n cv2.waitKey(1)\n\n # alle aus 'detected_objects' lokal speichern\n if save_all:\n print('saving all')\n for class_id in self.detected_objects.keys():\n self._save(class_id)\n n_saved += 1\n self.detected_objects = {}\n return n_infered, n_detected, n_saved\n\n # Funkiont zum speichern der Bilder\n def _save(self, class_id):\n class_name = self.labels[class_id - 1]\n print('saving ', class_name)\n time_stamp = datetime.now().strftime(\"%d-%b-%Y_%H-%M-%S\")\n file_name = time_stamp + '_' + class_name + '.jpg'\n image_array = self.detected_objects[class_id][1]\n # save image local\n cv2.imwrite(os.path.join(\n self.output_dir, file_name), image_array)\n",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
import os
import flask_sqlalchemy as sqlalchemy
from flask import Flask, jsonify, request,render_template,redirect,url_for,json,flash
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_cors import CORS
import datetime
from flask_bootstrap import Bootstrap
from flask_login import LoginManager,current_user, login_user,logout_user, login_required
from flask_login import UserMixin
from hashlib import md5
from database.models import *
#from sqlalchemy_imageattach.entity import Image, image_attachment
app = Flask(__name__,static_url_path='/static')
app.debug = True
CORS(app)
login_manager = LoginManager()
login_manager.init_app(app)
#UPLOAD_FOLDER = '../static/templates'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
bootstrap = Bootstrap(app)
app.config.update(DEBUG=True)
db = sqlalchemy.SQLAlchemy(app)
base_url = '/api/'
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
@app.route(base_url, methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('studenthome'))
form = LoginForm()
if form.validate_on_submit():
user = Student.query.filter_by(email=form.email.data).first()
# Login Student
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('apply'))
user = Instructor.query.filter_by(email=form.email.data).first()
# Login Instructor
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('post'))
# Login failed
flash('Invalid username or password')
return redirect(url_for('login'))
return render_template('mainpage.html', title='Sign In', form=form)
# Route to student Profile
@app.route(base_url + 'studentProfile', methods=['GET'])
def studenthome():
return render_template('student_Profile.html')
# Route to Instructor Profile
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
# Route to create a student account and main page
@app.route(base_url + 'Register', methods=['POST','GET'])
def createAccount():
if request.method == 'POST':
# Student option is checked
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
# Make sure id is unique
while Instructor.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
# Instructor option is checked
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
# Make sure id is unique
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
#return render_template('studenPortal.html', Jobs = Jobs.query.all())
# Route to create a instructor account
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({"status": 1, "instructor": instructor_to_obj(instructor)}), 200
# Route to post a job for Instructors
@app.route(base_url + 'post', methods=['POST','GET'])
#@login_required
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'],request.form['Semester'],request.form['pay'],request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
#,applicates = Job_Application.query.all()
return render_template('instructorPortal.html',applicates = Job_Application.query.all())
# Route to Display jobs for students
@app.route(base_url + 'apply', methods=['POST','GET'])
@login_required
def apply():
if request.method == 'POST':
#temp_student = Student(first_name=current_user.first_name,last_name=current_user.last_name,email=current_user.email,password=current_user.password)
#db.session.add(temp_student)
#db.session.commit()
new_app = Job_Application(grade_recieved=request.form['Grade'],Avalialability=request.form['Avalialability'],bio=request.form['bio'],gpa_overall=request.form['gpa_overall'],job_status=request.form['job_status'],owner=current_user)
new_app.job_status = "Submited"
#new_app = Job_Application(owner=temp_student)
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash("Job Application successfully Submited")
return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id))
# Route to edit info in a student account
# Edit ONLY major, gpa and grad_date
@app.route(base_url + 'students_edit', methods=['GET', 'POST'])
@login_required
def editStudent():
if request.method == 'POST':
current_user.gpa = request.form['editGpa']
current_user.major = request.form['editMajor']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('student_Profile.html',current_user=current_user)
return render_template('student_Profile.html',current_user=current_user)
# Route to edit info in an Instructor account
# Edit ONLY email, office, and phone
@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])
@login_required
def editInstructor():
if request.method == 'POST':
current_user.email = request.form['editEmail']
current_user.phone = request.form['editPhone']
current_user.office = request.form['editOffice']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('Instructor_Profile.html',current_user=current_user)
return render_template('Instructor_Profile.html',current_user=current_user)
# Route to update Student Application
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id =applicate.owner_id)
student.Job_Application.job_status = "Rejected"
db,session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html',applicates = Job_Application.query.all())
# Route to Delete student Application
@app.route(base_url + 'cancel_Application', methods=['DELETE'])
@login_required
def cancel_application():
if request.method == 'DELETE':
job_position = request.form['job_name']
job_pos = current_user.jobs.filter_by(position=job_position)
db.session.delete(job_pos)
db.session.commit()
db.session.refresh()
return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id),applied=Jobs.query.filter_by())
# Route to Login out User
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all() # creates the tables you've provided
app.run(debug=True) # runs the Flask application
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "5dc17db0aca109720d1ba62d65b86d9b81714063",
"index": 6622,
"step-1": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n<mask token>\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id),\n applied=Jobs.query.filter_by())\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n@app.route(base_url, methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('studenthome'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Student.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('apply'))\n user = Instructor.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('post'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n return render_template('mainpage.html', title='Sign In', form=form)\n\n\n@app.route(base_url + 'studentProfile', methods=['GET'])\ndef studenthome():\n return render_template('student_Profile.html')\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n@app.route(base_url + 'students_edit', methods=['GET', 'POST'])\n@login_required\ndef editStudent():\n if request.method == 'POST':\n current_user.gpa = request.form['editGpa']\n current_user.major = request.form['editMajor']\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n return render_template('student_Profile.html', current_user=\n current_user)\n return render_template('student_Profile.html', current_user=current_user)\n\n\n<mask token>\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id),\n applied=Jobs.query.filter_by())\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\n<mask token>\n",
"step-4": "import os\nimport flask_sqlalchemy as sqlalchemy\nfrom flask import Flask, jsonify, request, render_template, redirect, url_for, json, flash\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\nfrom flask_cors import CORS\nimport datetime\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager, current_user, login_user, logout_user, login_required\nfrom flask_login import UserMixin\nfrom hashlib import md5\nfrom database.models import *\napp = Flask(__name__, static_url_path='/static')\napp.debug = True\nCORS(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'\napp.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nbootstrap = Bootstrap(app)\napp.config.update(DEBUG=True)\ndb = sqlalchemy.SQLAlchemy(app)\nbase_url = '/api/'\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n@app.route(base_url, methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('studenthome'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Student.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('apply'))\n user = Instructor.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('post'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n return render_template('mainpage.html', title='Sign In', form=form)\n\n\n@app.route(base_url + 'studentProfile', methods=['GET'])\ndef studenthome():\n return render_template('student_Profile.html')\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n@app.route(base_url + 'students_edit', methods=['GET', 'POST'])\n@login_required\ndef editStudent():\n if request.method == 'POST':\n current_user.gpa = request.form['editGpa']\n current_user.major = request.form['editMajor']\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n return render_template('student_Profile.html', current_user=\n current_user)\n return render_template('student_Profile.html', current_user=current_user)\n\n\n@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])\n@login_required\ndef editInstructor():\n if request.method == 'POST':\n current_user.email = request.form['editEmail']\n current_user.phone = request.form['editPhone']\n current_user.office = request.form['editOffice']\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n return render_template('Instructor_Profile.html', current_user=\n current_user)\n return render_template('Instructor_Profile.html', current_user=current_user\n )\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id),\n applied=Jobs.query.filter_by())\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\n\nimport flask_sqlalchemy as sqlalchemy\nfrom flask import Flask, jsonify, request,render_template,redirect,url_for,json,flash\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\nfrom flask_cors import CORS\nimport datetime\nfrom flask_bootstrap import Bootstrap\n\nfrom flask_login import LoginManager,current_user, login_user,logout_user, login_required\nfrom flask_login import UserMixin\n\nfrom hashlib import md5\n\n\nfrom database.models import *\n\n\n\n#from sqlalchemy_imageattach.entity import Image, image_attachment\n\n\napp = Flask(__name__,static_url_path='/static')\napp.debug = True\nCORS(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n#UPLOAD_FOLDER = '../static/templates'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'\napp.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nbootstrap = Bootstrap(app)\napp.config.update(DEBUG=True)\ndb = sqlalchemy.SQLAlchemy(app)\n\n\n\nbase_url = '/api/'\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n \n \n \n@app.route(base_url, methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('studenthome'))\n\n form = LoginForm()\n if form.validate_on_submit():\n\n user = Student.query.filter_by(email=form.email.data).first()\n # Login Student\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('apply'))\n\n user = Instructor.query.filter_by(email=form.email.data).first()\n # Login Instructor\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('post'))\n\n # Login failed\n flash('Invalid username or password')\n return redirect(url_for('login'))\n \n return render_template('mainpage.html', title='Sign In', form=form)\n\n\n\n# Route to student Profile\n@app.route(base_url + 'studentProfile', methods=['GET'])\ndef studenthome():\n return render_template('student_Profile.html')\n\n\n# Route to Instructor Profile\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n# Route to create a student account and main page\n@app.route(base_url + 'Register', methods=['POST','GET'])\ndef createAccount():\n\n if request.method == 'POST':\n # Student option is checked\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n\n # Make sure id is unique\n while Instructor.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n\n login_user(new_user)\n return redirect(url_for('studenthome'))\n\n # Instructor option is checked\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n\n # Make sure id is unique\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n\n return redirect(url_for('login'))\n #return render_template('studenPortal.html', Jobs = Jobs.query.all())\n\n\n\n# Route to create a instructor account\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n \n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({\"status\": 1, \"instructor\": instructor_to_obj(instructor)}), 200\n\n# Route to post a job for Instructors\n@app.route(base_url + 'post', methods=['POST','GET'])\n#@login_required\ndef post():\n\n if request.method == 'POST':\n new_job = Jobs(request.form['position'],request.form['Semester'],request.form['pay'],request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n #,applicates = Job_Application.query.all()\n return render_template('instructorPortal.html',applicates = Job_Application.query.all())\n\n# Route to Display jobs for students\n@app.route(base_url + 'apply', methods=['POST','GET'])\n@login_required\ndef apply():\n\n \n if request.method == 'POST':\n #temp_student = Student(first_name=current_user.first_name,last_name=current_user.last_name,email=current_user.email,password=current_user.password)\n #db.session.add(temp_student)\n #db.session.commit()\n new_app = Job_Application(grade_recieved=request.form['Grade'],Avalialability=request.form['Avalialability'],bio=request.form['bio'],gpa_overall=request.form['gpa_overall'],job_status=request.form['job_status'],owner=current_user)\n new_app.job_status = \"Submited\" \n \n #new_app = Job_Application(owner=temp_student)\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash(\"Job Application successfully Submited\")\n\n \n\n\n return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id))\n\n\n# Route to edit info in a student account\n# Edit ONLY major, gpa and grad_date\n@app.route(base_url + 'students_edit', methods=['GET', 'POST'])\n@login_required\ndef editStudent():\n\n if request.method == 'POST':\n\n current_user.gpa = request.form['editGpa']\n current_user.major = request.form['editMajor']\n\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n\n return render_template('student_Profile.html',current_user=current_user)\n\n return render_template('student_Profile.html',current_user=current_user)\n\n \n\n\n# Route to edit info in an Instructor account\n# Edit ONLY email, office, and phone\n@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])\n@login_required\ndef editInstructor():\n\n if request.method == 'POST':\n\n current_user.email = request.form['editEmail']\n current_user.phone = request.form['editPhone']\n current_user.office = request.form['editOffice']\n\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n\n return render_template('Instructor_Profile.html',current_user=current_user)\n\n return render_template('Instructor_Profile.html',current_user=current_user)\n \n# Route to update Student Application \n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n \n if request.method == 'POST':\n student = Student.query.filter_by(id =applicate.owner_id)\n student.Job_Application.job_status = \"Rejected\"\n db,session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html',applicates = Job_Application.query.all())\n\n\n\n\n# Route to Delete student Application\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id),applied=Jobs.query.filter_by())\n\n\n# Route to Login out User\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all() # creates the tables you've provided\n app.run(debug=True) # runs the Flask application \n \nif __name__ == '__main__':\n main()\n",
"step-ids": [
9,
10,
13,
17,
18
]
}
|
[
9,
10,
13,
17,
18
] |
<|reserved_special_token_0|>
def fileWrite(content):
""" write result to result.txt """
file = open('./result.txt', 'w')
file.write(content)
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data(query):
""" fetch data from database """
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
data = c.fetchall()
db.close()
return data
def fileWrite(content):
""" write result to result.txt """
file = open('./result.txt', 'w')
file.write(content)
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DBNAME = 'news'
query1 = """
select title, count(*) as numOfViews from articles,log
where concat('/article/', articles.slug) = log.path
group by title order by numOfViews desc limit 3;
"""
query2 = """
select authors.name, count(*) as numOfViews
from articles, authors, log
where articles.author = authors.id
and concat('/article/', articles.slug) = log.path
group by authors.name order by numOfViews desc ;
"""
query3 = """
select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)
as error from (select date_trunc('day', time) as badDay,
count(*) as total,
sum(case when status!='200 OK' then 1 else 0 end) as err
from log
group by badDay) as innerQuery
where round((100.0*innerQuery.err/innerQuery.total),3) >1;
"""
result = ''
def get_data(query):
""" fetch data from database """
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
data = c.fetchall()
db.close()
return data
def fileWrite(content):
""" write result to result.txt """
file = open('./result.txt', 'w')
file.write(content)
file.close()
def appendToResult(content, isError=False):
""" formating db result to readable text """
global result
if isError:
for c in content:
result += c[0].strftime('%Y-%m-%d') + ' - ' + str(c[1]) + '% error'
else:
for c in content:
result += c[0] + ' - ' + str(c[1]) + ' views \n'
fileWrite(result)
if __name__ == '__main__':
result += '\n1. What are the most popular three articles of all time?\n\n'
appendToResult(get_data(query1))
result += ' \n2. Who are the most popular article authors of all time?\n\n'
appendToResult(get_data(query2))
result += (
'\n3. On which days did more than\n 1% of requests lead to errors?\n\n'
)
appendToResult(get_data(query3), True)
print(result)
fileWrite(result)
<|reserved_special_token_1|>
import psycopg2
DBNAME = 'news'
query1 = """
select title, count(*) as numOfViews from articles,log
where concat('/article/', articles.slug) = log.path
group by title order by numOfViews desc limit 3;
"""
query2 = """
select authors.name, count(*) as numOfViews
from articles, authors, log
where articles.author = authors.id
and concat('/article/', articles.slug) = log.path
group by authors.name order by numOfViews desc ;
"""
query3 = """
select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)
as error from (select date_trunc('day', time) as badDay,
count(*) as total,
sum(case when status!='200 OK' then 1 else 0 end) as err
from log
group by badDay) as innerQuery
where round((100.0*innerQuery.err/innerQuery.total),3) >1;
"""
result = ''
def get_data(query):
""" fetch data from database """
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
data = c.fetchall()
db.close()
return data
def fileWrite(content):
""" write result to result.txt """
file = open('./result.txt', 'w')
file.write(content)
file.close()
def appendToResult(content, isError=False):
""" formating db result to readable text """
global result
if isError:
for c in content:
result += c[0].strftime('%Y-%m-%d') + ' - ' + str(c[1]) + '% error'
else:
for c in content:
result += c[0] + ' - ' + str(c[1]) + ' views \n'
fileWrite(result)
if __name__ == '__main__':
result += '\n1. What are the most popular three articles of all time?\n\n'
appendToResult(get_data(query1))
result += ' \n2. Who are the most popular article authors of all time?\n\n'
appendToResult(get_data(query2))
result += (
'\n3. On which days did more than\n 1% of requests lead to errors?\n\n'
)
appendToResult(get_data(query3), True)
print(result)
fileWrite(result)
<|reserved_special_token_1|>
#!/usr/bin/env python
import psycopg2
DBNAME = "news"
query1 = """
select title, count(*) as numOfViews from articles,log
where concat('/article/', articles.slug) = log.path
group by title order by numOfViews desc limit 3;
"""
query2 = """
select authors.name, count(*) as numOfViews
from articles, authors, log
where articles.author = authors.id
and concat('/article/', articles.slug) = log.path
group by authors.name order by numOfViews desc ;
"""
query3 = """
select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)
as error from (select date_trunc('day', time) as badDay,
count(*) as total,
sum(case when status!='200 OK' then 1 else 0 end) as err
from log
group by badDay) as innerQuery
where round((100.0*innerQuery.err/innerQuery.total),3) >1;
"""
result = ''
def get_data(query):
""" fetch data from database """
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
data = c.fetchall()
db.close()
return data
def fileWrite(content):
""" write result to result.txt """
file = open('./result.txt', 'w')
file.write(content)
file.close()
def appendToResult(content, isError=False):
""" formating db result to readable text """
global result
if(isError):
for c in content:
result += c[0].strftime("%Y-%m-%d") + ' - ' + str(c[1]) + '% error'
else:
for c in content:
result += c[0] + ' - ' + str(c[1]) + ' views \n'
fileWrite(result)
if __name__ == '__main__':
result += '\n1. What are the most popular three articles of all time?\n\n'
appendToResult(get_data(query1))
result += ' \n2. Who are the most popular article authors of all time?\n\n'
appendToResult(get_data(query2))
result += '''\n3. On which days did more than
1% of requests lead to errors?\n\n'''
appendToResult(get_data(query3), True)
print(result)
fileWrite(result)
|
flexible
|
{
"blob_id": "612a3d168a09fc26530b95d258cbb4de6728419d",
"index": 3721,
"step-1": "<mask token>\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nDBNAME = 'news'\nquery1 = \"\"\"\nselect title, count(*) as numOfViews from articles,log\nwhere concat('/article/', articles.slug) = log.path\ngroup by title order by numOfViews desc limit 3;\n\"\"\"\nquery2 = \"\"\"\nselect authors.name, count(*) as numOfViews\nfrom articles, authors, log\nwhere articles.author = authors.id\nand concat('/article/', articles.slug) = log.path\ngroup by authors.name order by numOfViews desc ;\n\"\"\"\nquery3 = \"\"\"\n select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)\n as error from (select date_trunc('day', time) as badDay,\n count(*) as total,\n sum(case when status!='200 OK' then 1 else 0 end) as err\n from log\n group by badDay) as innerQuery\n where round((100.0*innerQuery.err/innerQuery.total),3) >1;\n \"\"\"\nresult = ''\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\ndef appendToResult(content, isError=False):\n \"\"\" formating db result to readable text \"\"\"\n global result\n if isError:\n for c in content:\n result += c[0].strftime('%Y-%m-%d') + ' - ' + str(c[1]) + '% error'\n else:\n for c in content:\n result += c[0] + ' - ' + str(c[1]) + ' views \\n'\n fileWrite(result)\n\n\nif __name__ == '__main__':\n result += '\\n1. What are the most popular three articles of all time?\\n\\n'\n appendToResult(get_data(query1))\n result += ' \\n2. Who are the most popular article authors of all time?\\n\\n'\n appendToResult(get_data(query2))\n result += (\n '\\n3. On which days did more than\\n 1% of requests lead to errors?\\n\\n'\n )\n appendToResult(get_data(query3), True)\n print(result)\n fileWrite(result)\n",
"step-4": "import psycopg2\nDBNAME = 'news'\nquery1 = \"\"\"\nselect title, count(*) as numOfViews from articles,log\nwhere concat('/article/', articles.slug) = log.path\ngroup by title order by numOfViews desc limit 3;\n\"\"\"\nquery2 = \"\"\"\nselect authors.name, count(*) as numOfViews\nfrom articles, authors, log\nwhere articles.author = authors.id\nand concat('/article/', articles.slug) = log.path\ngroup by authors.name order by numOfViews desc ;\n\"\"\"\nquery3 = \"\"\"\n select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)\n as error from (select date_trunc('day', time) as badDay,\n count(*) as total,\n sum(case when status!='200 OK' then 1 else 0 end) as err\n from log\n group by badDay) as innerQuery\n where round((100.0*innerQuery.err/innerQuery.total),3) >1;\n \"\"\"\nresult = ''\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\ndef appendToResult(content, isError=False):\n \"\"\" formating db result to readable text \"\"\"\n global result\n if isError:\n for c in content:\n result += c[0].strftime('%Y-%m-%d') + ' - ' + str(c[1]) + '% error'\n else:\n for c in content:\n result += c[0] + ' - ' + str(c[1]) + ' views \\n'\n fileWrite(result)\n\n\nif __name__ == '__main__':\n result += '\\n1. What are the most popular three articles of all time?\\n\\n'\n appendToResult(get_data(query1))\n result += ' \\n2. Who are the most popular article authors of all time?\\n\\n'\n appendToResult(get_data(query2))\n result += (\n '\\n3. On which days did more than\\n 1% of requests lead to errors?\\n\\n'\n )\n appendToResult(get_data(query3), True)\n print(result)\n fileWrite(result)\n",
"step-5": "#!/usr/bin/env python\n\nimport psycopg2\n\nDBNAME = \"news\"\n\nquery1 = \"\"\"\nselect title, count(*) as numOfViews from articles,log\nwhere concat('/article/', articles.slug) = log.path\ngroup by title order by numOfViews desc limit 3;\n\"\"\"\nquery2 = \"\"\"\nselect authors.name, count(*) as numOfViews\nfrom articles, authors, log\nwhere articles.author = authors.id\nand concat('/article/', articles.slug) = log.path\ngroup by authors.name order by numOfViews desc ;\n\"\"\"\nquery3 = \"\"\"\n select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)\n as error from (select date_trunc('day', time) as badDay,\n count(*) as total,\n sum(case when status!='200 OK' then 1 else 0 end) as err\n from log\n group by badDay) as innerQuery\n where round((100.0*innerQuery.err/innerQuery.total),3) >1;\n \"\"\"\nresult = ''\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\ndef appendToResult(content, isError=False):\n \"\"\" formating db result to readable text \"\"\"\n global result\n if(isError):\n for c in content:\n result += c[0].strftime(\"%Y-%m-%d\") + ' - ' + str(c[1]) + '% error'\n else:\n for c in content:\n result += c[0] + ' - ' + str(c[1]) + ' views \\n'\n fileWrite(result)\n\n\nif __name__ == '__main__':\n result += '\\n1. What are the most popular three articles of all time?\\n\\n'\n appendToResult(get_data(query1))\n result += ' \\n2. Who are the most popular article authors of all time?\\n\\n'\n appendToResult(get_data(query2))\n result += '''\\n3. On which days did more than\n 1% of requests lead to errors?\\n\\n'''\n appendToResult(get_data(query3), True)\n print(result)\n fileWrite(result)\n",
"step-ids": [
1,
2,
5,
6,
7
]
}
|
[
1,
2,
5,
6,
7
] |
# coding: utf-8
from korean.morphophonemics.phonology import Syllable
from notes.old_morphology import Noun, Verb
class Case (object):
pass
class Nominative (Case):
def apply(self, noun):
if noun.has_tail():
noun.syllables.append(Syllable(u'이'))
else:
noun.syllables.append(Syllable(u'가'))
class Accusative (Case):
def apply(self, noun):
if noun.has_tail():
noun.syllables.append(Syllable(u'을'))
else:
noun.syllables.append(Syllable(u'를'))
class Locative (Case):
def apply(self, noun):
noun.syllables.append(Syllable(u'에'))
noun.syllables.append(Syllable(u'서'))
class Sentence (object):
def __init__(self, subject=None, object=None, location=None, verb=None):
self.subject = subject
self.object = object
self.location = location
self.verb = verb
self.assign_cases()
self.analyze()
def assign_cases(self):
try:
self.subject.set_case(Nominative())
except AttributeError:
pass
try:
self.object.set_case(Accusative())
except AttributeError:
pass
try:
self.location.set_case(Locative())
except AttributeError:
pass
def analyze(self):
pass
def __str__(self):
sentence = ' '.join([
str(part) for part in [
self.subject,
self.location,
self.object,
self.verb,
] if part
])
return sentence
def main():
dave = Noun(u'데이브')
emma = Noun(u'연정')
elly = Noun(u'엘리')
house = Noun(u'집')
treat = Noun(u'트리트')
lunch = Noun(u'점심')
eat = Verb(u'머거요')
s1 = Sentence(subject=emma, verb=eat)
s2 = Sentence(subject=dave, object=lunch, verb=eat)
s3 = Sentence(subject=elly, object=treat, location=house, verb=eat)
print s1
print s2
print s3
if __name__ == '__main__': main()
|
normal
|
{
"blob_id": "1077efaa4379ff0e114a0b8d4d3b7156758e070f",
"index": 9861,
"step-1": "# coding: utf-8\n\nfrom korean.morphophonemics.phonology import Syllable\nfrom notes.old_morphology import Noun, Verb\n\n\nclass Case (object):\n pass \n\nclass Nominative (Case):\n def apply(self, noun):\n if noun.has_tail():\n noun.syllables.append(Syllable(u'이'))\n else:\n noun.syllables.append(Syllable(u'가'))\n\nclass Accusative (Case):\n def apply(self, noun):\n if noun.has_tail():\n noun.syllables.append(Syllable(u'을'))\n else:\n noun.syllables.append(Syllable(u'를'))\n\nclass Locative (Case):\n def apply(self, noun):\n noun.syllables.append(Syllable(u'에'))\n noun.syllables.append(Syllable(u'서'))\n \nclass Sentence (object):\n def __init__(self, subject=None, object=None, location=None, verb=None):\n self.subject = subject\n self.object = object\n self.location = location\n self.verb = verb\n \n self.assign_cases()\n self.analyze()\n\n def assign_cases(self):\n try:\n self.subject.set_case(Nominative()) \n except AttributeError:\n pass\n try:\n self.object.set_case(Accusative()) \n except AttributeError:\n pass\n try:\n self.location.set_case(Locative()) \n except AttributeError:\n pass\n \n def analyze(self):\n pass \n \n def __str__(self):\n sentence = ' '.join([\n str(part) for part in [\n self.subject,\n self.location,\n self.object,\n self.verb,\n ] if part\n ])\n return sentence\n\ndef main():\n dave = Noun(u'데이브')\n emma = Noun(u'연정')\n elly = Noun(u'엘리')\n house = Noun(u'집')\n treat = Noun(u'트리트')\n lunch = Noun(u'점심')\n eat = Verb(u'머거요')\n s1 = Sentence(subject=emma, verb=eat)\n s2 = Sentence(subject=dave, object=lunch, verb=eat)\n s3 = Sentence(subject=elly, object=treat, location=house, verb=eat)\n print s1\n print s2\n print s3\n \nif __name__ == '__main__': main()\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.