code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import os
import log
import core
import time
__description__ = 'OS X Auditor'
__author__ = 'Atarimaster & @Jipe_'
__version__ = '0.5.0'
ROOT_PATH = '/'
Euid = str(os.geteuid())
Egid = str(os.getegid())
def generate_header():
header = {}
# Description(Audited By)
description = "Report generated by " + __description__ + " v" + __version__ + " on " + time.strftime('%x %X %Z') + " running as " + Euid + "/" + Egid
header['description'] = description
# Audited Path
audit_path = "Audited system path: " + ROOT_PATH.decode("utf-8")
header['audit_path'] = audit_path
# System Version
AuditedSystemVersion = GetAuditedSystemVersion()
sysv = "Version of the audited system: " + AuditedSystemVersion
header['system_version'] = sysv
# Current Timezone
Timezone = GetAuditedSystemTimezone()
tz = "Current timezone of the audited system: " + Timezone
header['timezone'] = tz
return header
def GetAuditedSystemVersion():
global OSX_VERSION
SysVersion = "Unknown system version"
SystemVersionPlist = False
SystemVersionPlist = core.UniversalReadPlist("/System/Library/CoreServices/SystemVersion.plist")
if SystemVersionPlist:
if "ProductName" in SystemVersionPlist: SysVersion = SystemVersionPlist["ProductName"]
if "ProductVersion" in SystemVersionPlist: SysVersion += " " + SystemVersionPlist["ProductVersion"]
if "ProductBuildVersion" in SystemVersionPlist: SysVersion += " build " + SystemVersionPlist["ProductBuildVersion"]
OSX_VERSION = {
"ProductBuildVersion": SystemVersionPlist["ProductBuildVersion"],
"ProductVersion": SystemVersionPlist["ProductVersion"],
"MajorVersion": int(SystemVersionPlist["ProductVersion"].split('.')[0]),
"MinorVersion": int(SystemVersionPlist["ProductVersion"].split('.')[1]),
"PatchVersion": int(SystemVersionPlist["ProductVersion"].split('.')[2])
}
else:
log.PrintAndLog(u"Cannot determine the system version", "ERROR")
return SysVersion
def GetAuditedSystemTimezone():
""" Return the current system timezone """
Timezone = False
try:
Timezone = os.path.realpath(os.path.join(ROOT_PATH, "etc/localtime"))
Timezone = Timezone.split("/")
except Exception as e:
PrintAndLog(u"Cannot read the timezone" + str(e.args).decode("utf-8"), "ERROR")
return Timezone[-2] + "/" + Timezone[-1]
|
normal
|
{
"blob_id": "547d67bce7eb05e55e02c73a22342ca572e89f39",
"index": 9959,
"step-1": "<mask token>\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-2": "<mask token>\n\n\ndef generate_header():\n header = {}\n description = ('Report generated by ' + __description__ + ' v' +\n __version__ + ' on ' + time.strftime('%x %X %Z') + ' running as ' +\n Euid + '/' + Egid)\n header['description'] = description\n audit_path = 'Audited system path: ' + ROOT_PATH.decode('utf-8')\n header['audit_path'] = audit_path\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = 'Version of the audited system: ' + AuditedSystemVersion\n header['system_version'] = sysv\n Timezone = GetAuditedSystemTimezone()\n tz = 'Current timezone of the audited system: ' + Timezone\n header['timezone'] = tz\n return header\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-3": "<mask token>\n__description__ = 'OS X Auditor'\n__author__ = 'Atarimaster & @Jipe_'\n__version__ = '0.5.0'\nROOT_PATH = '/'\nEuid = str(os.geteuid())\nEgid = str(os.getegid())\n\n\ndef generate_header():\n header = {}\n description = ('Report generated by ' + __description__ + ' v' +\n __version__ + ' on ' + time.strftime('%x %X %Z') + ' running as ' +\n Euid + '/' + Egid)\n header['description'] = description\n audit_path = 'Audited system path: ' + ROOT_PATH.decode('utf-8')\n header['audit_path'] = audit_path\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = 'Version of the audited system: ' + AuditedSystemVersion\n header['system_version'] = sysv\n Timezone = GetAuditedSystemTimezone()\n tz = 'Current timezone of the audited system: ' + Timezone\n header['timezone'] = tz\n return header\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-4": "import os\nimport log\nimport core\nimport time\n__description__ = 'OS X Auditor'\n__author__ = 'Atarimaster & @Jipe_'\n__version__ = '0.5.0'\nROOT_PATH = '/'\nEuid = str(os.geteuid())\nEgid = str(os.getegid())\n\n\ndef generate_header():\n header = {}\n description = ('Report generated by ' + __description__ + ' v' +\n __version__ + ' on ' + time.strftime('%x %X %Z') + ' running as ' +\n Euid + '/' + Egid)\n header['description'] = description\n audit_path = 'Audited system path: ' + ROOT_PATH.decode('utf-8')\n header['audit_path'] = audit_path\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = 'Version of the audited system: ' + AuditedSystemVersion\n header['system_version'] = sysv\n Timezone = GetAuditedSystemTimezone()\n tz = 'Current timezone of the audited system: ' + Timezone\n header['timezone'] = tz\n return header\n\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n SysVersion = 'Unknown system version'\n SystemVersionPlist = False\n SystemVersionPlist = core.UniversalReadPlist(\n '/System/Library/CoreServices/SystemVersion.plist')\n if SystemVersionPlist:\n if 'ProductName' in SystemVersionPlist:\n SysVersion = SystemVersionPlist['ProductName']\n if 'ProductVersion' in SystemVersionPlist:\n SysVersion += ' ' + SystemVersionPlist['ProductVersion']\n if 'ProductBuildVersion' in SystemVersionPlist:\n SysVersion += ' build ' + SystemVersionPlist['ProductBuildVersion']\n OSX_VERSION = {'ProductBuildVersion': SystemVersionPlist[\n 'ProductBuildVersion'], 'ProductVersion': SystemVersionPlist[\n 'ProductVersion'], 'MajorVersion': int(SystemVersionPlist[\n 'ProductVersion'].split('.')[0]), 'MinorVersion': int(\n SystemVersionPlist['ProductVersion'].split('.')[1]),\n 'PatchVersion': int(SystemVersionPlist['ProductVersion'].split(\n '.')[2])}\n else:\n log.PrintAndLog(u'Cannot determine the system version', 'ERROR')\n return SysVersion\n\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, 'etc/localtime'))\n Timezone = Timezone.split('/')\n except Exception as e:\n PrintAndLog(u'Cannot read the timezone' + str(e.args).decode(\n 'utf-8'), 'ERROR')\n return Timezone[-2] + '/' + Timezone[-1]\n",
"step-5": "import os\nimport log\nimport core\nimport time\n\n__description__ = 'OS X Auditor'\n__author__ = 'Atarimaster & @Jipe_'\n__version__ = '0.5.0'\n\nROOT_PATH = '/'\n\nEuid = str(os.geteuid())\nEgid = str(os.getegid())\n\ndef generate_header():\n header = {}\n\n # Description(Audited By)\n description = \"Report generated by \" + __description__ + \" v\" + __version__ + \" on \" + time.strftime('%x %X %Z') + \" running as \" + Euid + \"/\" + Egid\n header['description'] = description\n\n # Audited Path\n audit_path = \"Audited system path: \" + ROOT_PATH.decode(\"utf-8\")\n header['audit_path'] = audit_path\n\n # System Version\n AuditedSystemVersion = GetAuditedSystemVersion()\n sysv = \"Version of the audited system: \" + AuditedSystemVersion\n header['system_version'] = sysv\n\n # Current Timezone\n Timezone = GetAuditedSystemTimezone()\n tz = \"Current timezone of the audited system: \" + Timezone\n header['timezone'] = tz\n\n return header\n\ndef GetAuditedSystemVersion():\n global OSX_VERSION\n\n SysVersion = \"Unknown system version\"\n SystemVersionPlist = False\n\n SystemVersionPlist = core.UniversalReadPlist(\"/System/Library/CoreServices/SystemVersion.plist\")\n\n if SystemVersionPlist:\n if \"ProductName\" in SystemVersionPlist: SysVersion = SystemVersionPlist[\"ProductName\"]\n if \"ProductVersion\" in SystemVersionPlist: SysVersion += \" \" + SystemVersionPlist[\"ProductVersion\"]\n if \"ProductBuildVersion\" in SystemVersionPlist: SysVersion += \" build \" + SystemVersionPlist[\"ProductBuildVersion\"]\n\n OSX_VERSION = {\n \"ProductBuildVersion\": SystemVersionPlist[\"ProductBuildVersion\"],\n \"ProductVersion\": SystemVersionPlist[\"ProductVersion\"],\n \"MajorVersion\": int(SystemVersionPlist[\"ProductVersion\"].split('.')[0]),\n \"MinorVersion\": int(SystemVersionPlist[\"ProductVersion\"].split('.')[1]),\n \"PatchVersion\": int(SystemVersionPlist[\"ProductVersion\"].split('.')[2])\n }\n\n else:\n log.PrintAndLog(u\"Cannot determine the system version\", \"ERROR\")\n\n return SysVersion\n\ndef GetAuditedSystemTimezone():\n \"\"\" Return the current system timezone \"\"\"\n\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, \"etc/localtime\"))\n Timezone = Timezone.split(\"/\")\n except Exception as e:\n PrintAndLog(u\"Cannot read the timezone\" + str(e.args).decode(\"utf-8\"), \"ERROR\")\n\n return Timezone[-2] + \"/\" + Timezone[-1]",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def pixels_generator(w, h):
i = 0
while i < w * h:
yield divmod(i, w)
i = i + 1
<|reserved_special_token_1|>
def pixels_generator(w, h):
i = 0
while i < (w * h):
yield divmod(i, w)
i = i + 1
|
flexible
|
{
"blob_id": "bb481fa038835abc6d61a4985b1e30c7c00bff96",
"index": 158,
"step-1": "<mask token>\n",
"step-2": "def pixels_generator(w, h):\n i = 0\n while i < w * h:\n yield divmod(i, w)\n i = i + 1\n",
"step-3": "def pixels_generator(w, h):\n i = 0\n while i < (w * h):\n yield divmod(i, w)\n i = i + 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
FILE = 'Luke'
NAME = 'Luke Walker'
NATIONALITY = 'American'
CLASS = 'Manipulator'
WEAPON = ''
BIRTH = ''
BIRTH_LOCATION = ''
LETTER = 'W'
RECRUITMENT_ORDER = 10
SUMMARY = ''
ABILITIES = ''
BACKSTORY = ''
HIGHLIGHTS = ''
SUMMONS = 'Tonberry', 'Grimnir', 'Griever', 'Starlet'
<|reserved_special_token_1|>
FILE = "Luke"
NAME = "Luke Walker"
NATIONALITY = "American"
CLASS = "Manipulator"
WEAPON = ""
BIRTH = ""
BIRTH_LOCATION = ""
LETTER = "W"
RECRUITMENT_ORDER = 10
SUMMARY = ""
ABILITIES = ""
BACKSTORY = ""
HIGHLIGHTS = ""
SUMMONS = ("Tonberry", "Grimnir", "Griever", "Starlet")
|
flexible
|
{
"blob_id": "fa3ab879541c04e278317b11dd79e6e1b4319536",
"index": 7586,
"step-1": "<mask token>\n",
"step-2": "FILE = 'Luke'\nNAME = 'Luke Walker'\nNATIONALITY = 'American'\nCLASS = 'Manipulator'\nWEAPON = ''\nBIRTH = ''\nBIRTH_LOCATION = ''\nLETTER = 'W'\nRECRUITMENT_ORDER = 10\nSUMMARY = ''\nABILITIES = ''\nBACKSTORY = ''\nHIGHLIGHTS = ''\nSUMMONS = 'Tonberry', 'Grimnir', 'Griever', 'Starlet'\n",
"step-3": "FILE = \"Luke\"\n\nNAME = \"Luke Walker\"\n\nNATIONALITY = \"American\"\n\nCLASS = \"Manipulator\"\n\nWEAPON = \"\"\n\nBIRTH = \"\"\n\nBIRTH_LOCATION = \"\"\n\nLETTER = \"W\"\n\nRECRUITMENT_ORDER = 10\n\nSUMMARY = \"\"\n\nABILITIES = \"\"\n\nBACKSTORY = \"\"\n\nHIGHLIGHTS = \"\"\n\nSUMMONS = (\"Tonberry\", \"Grimnir\", \"Griever\", \"Starlet\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def less(i1, i2):
return i1[0] * i2[1] < i2[0] * i1[1]
def equal(i1, i2):
return i1[0] * i2[1] == i2[0] * i1[1]
def more(i1, i2):
return i1[0] * i2[1] > i2[0] * i1[1]
def partition(x, l, r, pivot):
il = l
ir = l
for i in range(l, r):
if x[i] < pivot and ir < r:
x[il], x[i] = x[i], x[il]
if il != ir:
x[ir], x[i] = x[i], x[ir]
il += 1
ir += 1
elif x[i] == pivot and ir < r:
x[ir], x[i] = x[i], x[ir]
ir += 1
return il, ir
def qsort(x, l=0, r=None):
if r is None:
r = len(x)
if r - l > 1:
pivot = x[random.randint(l, r - 1)]
il, ir = partition(x, l, r, pivot)
qsort(x, l, il)
qsort(x, ir, r)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def less(i1, i2):
return i1[0] * i2[1] < i2[0] * i1[1]
def equal(i1, i2):
return i1[0] * i2[1] == i2[0] * i1[1]
def more(i1, i2):
return i1[0] * i2[1] > i2[0] * i1[1]
def partition(x, l, r, pivot):
il = l
ir = l
for i in range(l, r):
if x[i] < pivot and ir < r:
x[il], x[i] = x[i], x[il]
if il != ir:
x[ir], x[i] = x[i], x[ir]
il += 1
ir += 1
elif x[i] == pivot and ir < r:
x[ir], x[i] = x[i], x[ir]
ir += 1
return il, ir
def qsort(x, l=0, r=None):
if r is None:
r = len(x)
if r - l > 1:
pivot = x[random.randint(l, r - 1)]
il, ir = partition(x, l, r, pivot)
qsort(x, l, il)
qsort(x, ir, r)
N, w = list(map(int, input().split()))
x = []
for i in range(N):
x.append(tuple(map(int, input().split())))
qsort(x)
x = x[::-1]
s = 0
i = 0
while i < N and w >= x[i][1]:
s += x[i][0]
w -= x[i][1]
i += 1
if i < N:
s += x[i][0] * w // x[i][1]
print(s)
<|reserved_special_token_1|>
import random
def less(i1, i2):
return i1[0] * i2[1] < i2[0] * i1[1]
def equal(i1, i2):
return i1[0] * i2[1] == i2[0] * i1[1]
def more(i1, i2):
return i1[0] * i2[1] > i2[0] * i1[1]
def partition(x, l, r, pivot):
il = l
ir = l
for i in range(l, r):
if x[i] < pivot and ir < r:
x[il], x[i] = x[i], x[il]
if il != ir:
x[ir], x[i] = x[i], x[ir]
il += 1
ir += 1
elif x[i] == pivot and ir < r:
x[ir], x[i] = x[i], x[ir]
ir += 1
return il, ir
def qsort(x, l=0, r=None):
if r is None:
r = len(x)
if r - l > 1:
pivot = x[random.randint(l, r - 1)]
il, ir = partition(x, l, r, pivot)
qsort(x, l, il)
qsort(x, ir, r)
N, w = list(map(int, input().split()))
x = []
for i in range(N):
x.append(tuple(map(int, input().split())))
qsort(x)
x = x[::-1]
s = 0
i = 0
while i < N and w >= x[i][1]:
s += x[i][0]
w -= x[i][1]
i += 1
if i < N:
s += x[i][0] * w // x[i][1]
print(s)
<|reserved_special_token_1|>
import random
def less(i1, i2):
return i1[0] * i2[1] < i2[0] * i1[1]
def equal(i1, i2):
return i1[0] * i2[1] == i2[0] * i1[1]
def more(i1, i2):
return i1[0] * i2[1] > i2[0] * i1[1]
def partition(x, l, r, pivot):
il = l
ir = l
for i in range(l, r):
if x[i] < pivot and ir < r:
x[il], x[i] = x[i], x[il]
if il != ir:
x[ir], x[i] = x[i], x[ir]
il += 1
ir += 1
elif x[i] == pivot and ir < r:
x[ir], x[i] = x[i], x[ir]
ir += 1
return il, ir
def qsort(x, l=0, r=None):
if r is None:
r = len(x)
if (r - l) > 1:
pivot = x[random.randint(l, r - 1)]
il, ir = partition(x, l, r, pivot)
qsort(x, l, il)
qsort(x, ir, r)
N, w = list(map(int, input().split()))
x = []
for i in range(N):
x.append(tuple(map(int, input().split())))
qsort(x)
x = x[::-1]
s = 0
i = 0
while (i < N) and (w >= x[i][1]):
s += x[i][0]
w -= x[i][1]
i += 1
if i < N:
s += (x[i][0] * w // x[i][1])
print(s)
|
flexible
|
{
"blob_id": "a5e693a79211570f2d27575657496992f8fee164",
"index": 9075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if r - l > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if r - l > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\nN, w = list(map(int, input().split()))\nx = []\nfor i in range(N):\n x.append(tuple(map(int, input().split())))\nqsort(x)\nx = x[::-1]\ns = 0\ni = 0\nwhile i < N and w >= x[i][1]:\n s += x[i][0]\n w -= x[i][1]\n i += 1\nif i < N:\n s += x[i][0] * w // x[i][1]\nprint(s)\n",
"step-4": "import random\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if r - l > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\nN, w = list(map(int, input().split()))\nx = []\nfor i in range(N):\n x.append(tuple(map(int, input().split())))\nqsort(x)\nx = x[::-1]\ns = 0\ni = 0\nwhile i < N and w >= x[i][1]:\n s += x[i][0]\n w -= x[i][1]\n i += 1\nif i < N:\n s += x[i][0] * w // x[i][1]\nprint(s)\n",
"step-5": "import random\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if (r - l) > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\nN, w = list(map(int, input().split()))\nx = []\nfor i in range(N):\n x.append(tuple(map(int, input().split())))\nqsort(x)\nx = x[::-1]\n\ns = 0\ni = 0\nwhile (i < N) and (w >= x[i][1]):\n s += x[i][0]\n w -= x[i][1]\n i += 1\nif i < N:\n s += (x[i][0] * w // x[i][1])\n\nprint(s)\n",
"step-ids": [
0,
5,
7,
8,
9
]
}
|
[
0,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
class PrescriptionForm(forms.ModelForm):
class Meta:
model = Prescription
exclude = ['doctor']
widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = '__all__'
widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':
forms.TimeInput(attrs={'type': 'time'})}
<|reserved_special_token_0|>
class PrescriptionForm(forms.ModelForm):
class Meta:
model = Prescription
exclude = ['doctor']
widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = '__all__'
widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':
forms.TimeInput(attrs={'type': 'time'})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
self.fields['doctor'].queryset = User.objects.filter(usertype='D')
class PrescriptionForm(forms.ModelForm):
class Meta:
model = Prescription
exclude = ['doctor']
widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
<|reserved_special_token_1|>
from django import forms
from .models import Appointment, Prescription
from account.models import User
class AppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = '__all__'
widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':
forms.TimeInput(attrs={'type': 'time'})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
self.fields['doctor'].queryset = User.objects.filter(usertype='D')
class PrescriptionForm(forms.ModelForm):
class Meta:
model = Prescription
exclude = ['doctor']
widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
<|reserved_special_token_1|>
from django import forms
from .models import Appointment, Prescription
from account.models import User
class AppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = '__all__'
widgets = {
'date': forms.DateInput(attrs={'type': 'date'}),
'time': forms.TimeInput(attrs={'type': 'time'})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
# self.fields['patient'].empty_label = 'select patient'
self.fields['doctor'].queryset = User.objects.filter(usertype='D')
# self.fields['doctor'].empty_label = 'select doctor'
class PrescriptionForm(forms.ModelForm):
class Meta:
model = Prescription
exclude = ['doctor']
widgets = {
'prescription': forms.Textarea(attrs={'rows': 4}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
|
flexible
|
{
"blob_id": "d3425017d4e604a8940997afd0c35a4f7eac1170",
"index": 6944,
"step-1": "<mask token>\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n",
"step-2": "<mask token>\n\n\nclass AppointmentForm(forms.ModelForm):\n\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':\n forms.TimeInput(attrs={'type': 'time'})}\n <mask token>\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n",
"step-3": "<mask token>\n\n\nclass AppointmentForm(forms.ModelForm):\n\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':\n forms.TimeInput(attrs={'type': 'time'})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n self.fields['doctor'].queryset = User.objects.filter(usertype='D')\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n",
"step-4": "from django import forms\nfrom .models import Appointment, Prescription\nfrom account.models import User\n\n\nclass AppointmentForm(forms.ModelForm):\n\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':\n forms.TimeInput(attrs={'type': 'time'})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n self.fields['doctor'].queryset = User.objects.filter(usertype='D')\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n",
"step-5": "from django import forms\nfrom .models import Appointment, Prescription\nfrom account.models import User\n\n\nclass AppointmentForm(forms.ModelForm):\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {\n 'date': forms.DateInput(attrs={'type': 'date'}),\n 'time': forms.TimeInput(attrs={'type': 'time'})\n\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n\n # self.fields['patient'].empty_label = 'select patient'\n self.fields['doctor'].queryset = User.objects.filter(usertype='D')\n\n # self.fields['doctor'].empty_label = 'select doctor'\n\n\nclass PrescriptionForm(forms.ModelForm):\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {\n 'prescription': forms.Textarea(attrs={'rows': 4}),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
v1 = 3 + 4 * 2
print(v1)
v2 = (2 + 6) * 2
print(v2)
v3 = 2 ** 3 ** 2
print(v3)
v4 = 20 + 80 / 2
print(v4)
|
normal
|
{
"blob_id": "e6694403eecf2c4511c1fce959b5939f5f457bb8",
"index": 9384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(v1)\n<mask token>\nprint(v2)\n<mask token>\nprint(v3)\n<mask token>\nprint(v4)\n",
"step-3": "v1 = 3 + 4 * 2\nprint(v1)\nv2 = (2 + 6) * 2\nprint(v2)\nv3 = 2 ** 3 ** 2\nprint(v3)\nv4 = 20 + 80 / 2\nprint(v4)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)
), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
<|reserved_special_token_1|>
from __future__ import absolute_import, unicode_literals, print_function
import os
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)
), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
<|reserved_special_token_1|>
# vim:fileencoding=utf-8:noet
from __future__ import absolute_import, unicode_literals, print_function
import os
BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings')
TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')
DEFAULT_SYSTEM_CONFIG_DIR = None
|
flexible
|
{
"blob_id": "c435b0f162512bb2bc0c35e1817f64c5ef9ff7bc",
"index": 1871,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)\n ), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n",
"step-3": "from __future__ import absolute_import, unicode_literals, print_function\nimport os\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)\n ), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n",
"step-4": "# vim:fileencoding=utf-8:noet\n\nfrom __future__ import absolute_import, unicode_literals, print_function\nimport os\n\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Video_Server(threading.Thread):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Video_Server(threading.Thread):
<|reserved_special_token_0|>
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print('video close')
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Video_Server(threading.Thread):
def __init__(self, port, version, face_cap, view_version,
face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = '', port
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print('video close')
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from socket import *
import threading
import time
import cv2
import struct
import pickle
import zlib
import cartoon_edit
import face_capture_edit
import pencil_edit
class Video_Server(threading.Thread):
def __init__(self, port, version, face_cap, view_version,
face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = '', port
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print('video close')
def run(self):
detector, predictor = face_capture_edit.face_init(self.
face_shape_predictor)
print('face_capture_init is ready')
print('VIDEO server starts ...')
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print('remote VIDEO client success connected ...')
data = ''.encode('utf-8')
payload_size = struct.calcsize('L')
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack('L', packed_size)[0]
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),
detector, predictor)
cv2.imshow('Face_capture', frame_face)
if self.view_version == 0:
frame = frame
elif self.view_version == 1:
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow('Remote', 0)
cv2.resizeWindow('Remote', 640, 480)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 255 == ord('q'):
file_aip = open(self.break_audio_aip, 'w')
file_audio = open(self.break_audio, 'w')
break
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 19:16:16 2019
@author: pc
"""
from socket import *
import threading
import time
import cv2
import struct
import pickle
import zlib
import cartoon_edit
import face_capture_edit
import pencil_edit
class Video_Server(threading.Thread):
def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况
self.ADDR = ('',port)#指定套接字端口号
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:#IPV4 or IPV6
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6,SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print("video close")
def run(self):
detector, predictor = face_capture_edit.face_init(self.face_shape_predictor)
print("face_capture_init is ready")
print("VIDEO server starts ...")
self.sock.bind(self.ADDR)#关联特定的端口号
self.sock.listen(1)#监听
conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接
print("remote VIDEO client success connected ...")
data = "".encode("utf-8")#接收数据
payload_size = struct.calcsize("L")#记录当前缓冲区的数据长度,准确提取每一帧
cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧
data +=conn.recv(81920)
packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧
data = data[payload_size:]#从指定位置剪切到末尾
msg_size = struct.unpack("L",packed_size)[0]#解压前面的头
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)
cv2.imshow("Face_capture", frame_face)
if self.view_version == 0:#不变样式
frame = frame
elif self.view_version == 1:#漫画
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:#铅笔画
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow("Remote",0);
cv2.resizeWindow("Remote", 640, 480);
cv2.imshow("Remote", frame)
if cv2.waitKey(1) & 0xff == ord('q'):
file_aip = open(self.break_audio_aip,'w')
file_audio = open(self.break_audio,'w')
break
|
flexible
|
{
"blob_id": "6b138dabf57166ec971052fff7df89ae0346e083",
"index": 1582,
"step-1": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n <mask token>\n <mask token>\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-2": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n <mask token>\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-3": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n\n def __init__(self, port, version, face_cap, view_version,\n face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self.ADDR = '', port\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6, SOCK_STREAM)\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-4": "<mask token>\nfrom socket import *\nimport threading\nimport time\nimport cv2\nimport struct\nimport pickle\nimport zlib\nimport cartoon_edit\nimport face_capture_edit\nimport pencil_edit\n\n\nclass Video_Server(threading.Thread):\n\n def __init__(self, port, version, face_cap, view_version,\n face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self.ADDR = '', port\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6, SOCK_STREAM)\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 19:16:16 2019\n\n@author: pc\n\"\"\"\n\nfrom socket import *\nimport threading\nimport time\nimport cv2\nimport struct\nimport pickle\nimport zlib\nimport cartoon_edit\nimport face_capture_edit\nimport pencil_edit\n\nclass Video_Server(threading.Thread):\n def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况\n self.ADDR = ('',port)#指定套接字端口号\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:#IPV4 or IPV6\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6,SOCK_STREAM)\n \n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print(\"video close\")\n \n def run(self):\n detector, predictor = face_capture_edit.face_init(self.face_shape_predictor) \n print(\"face_capture_init is ready\")\n print(\"VIDEO server starts ...\")\n self.sock.bind(self.ADDR)#关联特定的端口号\n self.sock.listen(1)#监听\n conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接\n print(\"remote VIDEO client success connected ...\")\n data = \"\".encode(\"utf-8\")#接收数据\n payload_size = struct.calcsize(\"L\")#记录当前缓冲区的数据长度,准确提取每一帧\n cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧\n data +=conn.recv(81920)\n packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧\n data = data[payload_size:]#从指定位置剪切到末尾\n msg_size = struct.unpack(\"L\",packed_size)[0]#解压前面的头\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)\n cv2.imshow(\"Face_capture\", frame_face)\n if self.view_version == 0:#不变样式\n frame = frame\n elif self.view_version == 1:#漫画\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:#铅笔画\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow(\"Remote\",0);\n cv2.resizeWindow(\"Remote\", 640, 480);\n cv2.imshow(\"Remote\", frame)\n if cv2.waitKey(1) & 0xff == ord('q'):\n file_aip = open(self.break_audio_aip,'w')\n file_audio = open(self.break_audio,'w')\n break\n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('tensorflow version: {}'.format(tf.__version__))
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
<|reserved_special_token_0|>
tf.random.set_seed(13)
<|reserved_special_token_0|>
print(x_train_uni[0])
print(y_train_uni[0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('tensorflow version: {}'.format(tf.__version__))
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
TRAIN_SPLIT = 300000
BATCH_SIZE = 256
BUFFER_SIZE = 10000
tf.random.set_seed(13)
train_df = pd.read_csv('data/st-cloud.csv')
train_df = train_df.sort_values(by=['timestamp'])
train_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] ==
'sort') | (train_df['event'] == 'idle')]
x_train_uni, y_train_uni = __prepare_train_data(train_df, feature=
'CylinderBorePressure')
print(x_train_uni[0])
print(y_train_uni[0])
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni,
y_train_uni))
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
print('tensorflow version: {}'.format(tf.__version__))
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
TRAIN_SPLIT = 300000
BATCH_SIZE = 256
BUFFER_SIZE = 10000
tf.random.set_seed(13)
train_df = pd.read_csv('data/st-cloud.csv')
train_df = train_df.sort_values(by=['timestamp'])
train_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] ==
'sort') | (train_df['event'] == 'idle')]
x_train_uni, y_train_uni = __prepare_train_data(train_df, feature=
'CylinderBorePressure')
print(x_train_uni[0])
print(y_train_uni[0])
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni,
y_train_uni))
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
print('tensorflow version: {}'.format(tf.__version__))
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
TRAIN_SPLIT = 300000
BATCH_SIZE = 256
BUFFER_SIZE = 10000
tf.random.set_seed(13)
train_df = pd.read_csv('data/st-cloud.csv')
train_df = train_df.sort_values(by=['timestamp'])
train_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] == 'sort') | (train_df['event'] == 'idle')]
x_train_uni, y_train_uni = __prepare_train_data(train_df, feature='CylinderBorePressure')
print(x_train_uni[0])
print(y_train_uni[0])
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
# train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
#
# val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
# val_univariate = val_univariate.batch(BATCH_SIZE).repeat()
#
# simple_lstm_model = tf.keras.models.Sequential([
# tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),
# tf.keras.layers.Dense(1)
# ])
#
# simple_lstm_model.compile(optimizer='adam', loss='mae')
#
# for x, y in val_univariate.take(1):
# print(simple_lstm_model.predict(x).shape)
#
# EVALUATION_INTERVAL = 200
# EPOCHS = 10
#
# simple_lstm_model.fit(train_univariate, epochs=EPOCHS,
# steps_per_epoch=EVALUATION_INTERVAL,
# validation_data=val_univariate, validation_steps=50)
# for x, y in val_univariate.take(3):
# plot = show_plot([x[0].numpy(), y[0].numpy(),
# simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')
# plot.show()
|
flexible
|
{
"blob_id": "55030648a6b76636e456990c1d2b02baa35a695d",
"index": 9221,
"step-1": "<mask token>\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\n<mask token>\ntf.random.set_seed(13)\n<mask token>\nprint(x_train_uni[0])\nprint(y_train_uni[0])\n<mask token>\n",
"step-3": "<mask token>\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\nTRAIN_SPLIT = 300000\nBATCH_SIZE = 256\nBUFFER_SIZE = 10000\ntf.random.set_seed(13)\ntrain_df = pd.read_csv('data/st-cloud.csv')\ntrain_df = train_df.sort_values(by=['timestamp'])\ntrain_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] ==\n 'sort') | (train_df['event'] == 'idle')]\nx_train_uni, y_train_uni = __prepare_train_data(train_df, feature=\n 'CylinderBorePressure')\nprint(x_train_uni[0])\nprint(y_train_uni[0])\ntrain_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni,\n y_train_uni))\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n return digit_labels\n\n\nTRAIN_SPLIT = 300000\nBATCH_SIZE = 256\nBUFFER_SIZE = 10000\ntf.random.set_seed(13)\ntrain_df = pd.read_csv('data/st-cloud.csv')\ntrain_df = train_df.sort_values(by=['timestamp'])\ntrain_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] ==\n 'sort') | (train_df['event'] == 'idle')]\nx_train_uni, y_train_uni = __prepare_train_data(train_df, feature=\n 'CylinderBorePressure')\nprint(x_train_uni[0])\nprint(y_train_uni[0])\ntrain_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni,\n y_train_uni))\n",
"step-5": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\nprint('tensorflow version: {}'.format(tf.__version__))\n\n\ndef __prepare_train_data(df, feature):\n groups = df.groupby(['event', 'start'])\n data = []\n labels = []\n for id, group in groups:\n values = group['CylinderBorePressure'].values\n # Reshape data from (history_size,) to (history_size, 1)\n data.append(np.reshape(values, (len(values), 1)))\n labels.append(id[0])\n return np.array(data), np.array(convert_labels(labels))\n\n\ndef convert_labels(labels):\n digit_labels = []\n for label in labels:\n if label == 'cut':\n digit_labels.append(0.0)\n elif label == 'sort':\n digit_labels.append(1.0)\n elif label == 'idle':\n digit_labels.append(2.0)\n\n return digit_labels\n\n\nTRAIN_SPLIT = 300000\nBATCH_SIZE = 256\nBUFFER_SIZE = 10000\n\ntf.random.set_seed(13)\n\ntrain_df = pd.read_csv('data/st-cloud.csv')\ntrain_df = train_df.sort_values(by=['timestamp'])\ntrain_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] == 'sort') | (train_df['event'] == 'idle')]\nx_train_uni, y_train_uni = __prepare_train_data(train_df, feature='CylinderBorePressure')\n\nprint(x_train_uni[0])\nprint(y_train_uni[0])\n\ntrain_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))\n# train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()\n#\n# val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))\n# val_univariate = val_univariate.batch(BATCH_SIZE).repeat()\n#\n# simple_lstm_model = tf.keras.models.Sequential([\n# tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),\n# tf.keras.layers.Dense(1)\n# ])\n#\n# simple_lstm_model.compile(optimizer='adam', loss='mae')\n#\n# for x, y in val_univariate.take(1):\n# print(simple_lstm_model.predict(x).shape)\n#\n# EVALUATION_INTERVAL = 200\n# EPOCHS = 10\n#\n# simple_lstm_model.fit(train_univariate, epochs=EPOCHS,\n# steps_per_epoch=EVALUATION_INTERVAL,\n# validation_data=val_univariate, validation_steps=50)\n\n# for x, y in val_univariate.take(3):\n# plot = show_plot([x[0].numpy(), y[0].numpy(),\n# simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')\n# plot.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuadraticEquationsSolverConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuadraticEquationsSolverConfig(AppConfig):
name = 'quadratic_equations_solver'
<|reserved_special_token_1|>
from django.apps import AppConfig
class QuadraticEquationsSolverConfig(AppConfig):
name = 'quadratic_equations_solver'
|
flexible
|
{
"blob_id": "730fc527f3d2805559e8917e846b0b13f4a9f6ee",
"index": 2316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuadraticEquationsSolverConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass QuadraticEquationsSolverConfig(AppConfig):\n name = 'quadratic_equations_solver'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass QuadraticEquationsSolverConfig(AppConfig):\n name = 'quadratic_equations_solver'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
问题描述
玛莎(Marsha)和比尔(Bill)拥有一系列大理石。他们希望将藏品分开,以使两者获得相等的份额。如果所有的大理石都具有相同的价值,这将很容易,因为那样他们就可以将收藏品分成两半。
但不幸的是,有些大理石比其他大理石更大或更漂亮。因此,玛莎(Marsha)和比尔(Bill)首先为每个大理石分配一个值,即一个介于1到6之间的自然数。
现在,他们希望对大理石进行分割,以使每个大理石都获得相同的总价值。不幸的是,他们意识到以这种方式分割大理石可能是不可能的(即使所有大理石的总价值是均匀的)。
例如,如果存在一个值为1的大理石,值为3的一个,值为4的两个,则不能将它们拆分为相等值的集合。因此,他们要求您编写一个程序来检查大理石是否存在合理的分区。
输入
输入中的每一行都描述了一组要分割的大理石。每一行由六个非负整数n1,n2,...,n6组成,其中ni是值i的大理石数。因此,上面的示例将由输入行``1 0 1 2 0 0''描述。大理石的最大总数为20000。
输入文件的最后一行将为“ 0 0 0 0 0 0”;不要处理此行。
输出
对于每个集合,输出"集合k:",其中k是测试用例的编号,然后是``可以被分割"或``不能被分割''。
在每个测试用例之后输出空白行。
样本输入
1 0 1 2 0 0
1 0 0 0 1 1
0 0 0 0 0 0
样本输出
集合1:
不能被分割
集合2:
可以被分割
"""
S = []
print('输入:')
while True:
s = input()
if s == '0 0 0 0 0 0':
break
S.append(s)
print('\n输出:')
w = [1, 2, 3, 4, 5, 6]
for k in range(len(S)):
p = [int(i) for i in S[k].split()]
_sum = sum(i * j for i, j in zip(w, p))
if _sum % 2 != 0:
print(f'集合{k + 1}:\n不能被分割')
continue
V = _sum // 2
n = len(w)
dp = [False] * (V + 1)
dp[0] = True # 只有0件物品能达到0价值
for i in range(n):
num, total = 1, p[i]
while total > 0:
if num > total:
num = total
group_w = w[i] * num
for j in range(V, group_w - 1, -1):
dp[j] = dp[j - group_w]
total -= num
num <<= 1
if dp[V]:
print(f'集合{k + 1}:\n可以被分割')
else:
print(f'集合{k + 1}:\n不能被分割')
|
normal
|
{
"blob_id": "0d20b75bcc87db8f3e4bdd9d6448cc44c979de1d",
"index": 137,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('输入:')\nwhile True:\n s = input()\n if s == '0 0 0 0 0 0':\n break\n S.append(s)\nprint('\\n输出:')\n<mask token>\nfor k in range(len(S)):\n p = [int(i) for i in S[k].split()]\n _sum = sum(i * j for i, j in zip(w, p))\n if _sum % 2 != 0:\n print(f'集合{k + 1}:\\n不能被分割')\n continue\n V = _sum // 2\n n = len(w)\n dp = [False] * (V + 1)\n dp[0] = True\n for i in range(n):\n num, total = 1, p[i]\n while total > 0:\n if num > total:\n num = total\n group_w = w[i] * num\n for j in range(V, group_w - 1, -1):\n dp[j] = dp[j - group_w]\n total -= num\n num <<= 1\n if dp[V]:\n print(f'集合{k + 1}:\\n可以被分割')\n else:\n print(f'集合{k + 1}:\\n不能被分割')\n",
"step-3": "<mask token>\nS = []\nprint('输入:')\nwhile True:\n s = input()\n if s == '0 0 0 0 0 0':\n break\n S.append(s)\nprint('\\n输出:')\nw = [1, 2, 3, 4, 5, 6]\nfor k in range(len(S)):\n p = [int(i) for i in S[k].split()]\n _sum = sum(i * j for i, j in zip(w, p))\n if _sum % 2 != 0:\n print(f'集合{k + 1}:\\n不能被分割')\n continue\n V = _sum // 2\n n = len(w)\n dp = [False] * (V + 1)\n dp[0] = True\n for i in range(n):\n num, total = 1, p[i]\n while total > 0:\n if num > total:\n num = total\n group_w = w[i] * num\n for j in range(V, group_w - 1, -1):\n dp[j] = dp[j - group_w]\n total -= num\n num <<= 1\n if dp[V]:\n print(f'集合{k + 1}:\\n可以被分割')\n else:\n print(f'集合{k + 1}:\\n不能被分割')\n",
"step-4": "\"\"\"\n问题描述\n玛莎(Marsha)和比尔(Bill)拥有一系列大理石。他们希望将藏品分开,以使两者获得相等的份额。如果所有的大理石都具有相同的价值,这将很容易,因为那样他们就可以将收藏品分成两半。\n但不幸的是,有些大理石比其他大理石更大或更漂亮。因此,玛莎(Marsha)和比尔(Bill)首先为每个大理石分配一个值,即一个介于1到6之间的自然数。\n现在,他们希望对大理石进行分割,以使每个大理石都获得相同的总价值。不幸的是,他们意识到以这种方式分割大理石可能是不可能的(即使所有大理石的总价值是均匀的)。\n例如,如果存在一个值为1的大理石,值为3的一个,值为4的两个,则不能将它们拆分为相等值的集合。因此,他们要求您编写一个程序来检查大理石是否存在合理的分区。\n \n\n输入\n输入中的每一行都描述了一组要分割的大理石。每一行由六个非负整数n1,n2,...,n6组成,其中ni是值i的大理石数。因此,上面的示例将由输入行``1 0 1 2 0 0''描述。大理石的最大总数为20000。\n\n输入文件的最后一行将为“ 0 0 0 0 0 0”;不要处理此行。\n\n输出\n对于每个集合,输出\"集合k:\",其中k是测试用例的编号,然后是``可以被分割\"或``不能被分割''。\n\n在每个测试用例之后输出空白行。\n \n\n样本输入\n1 0 1 2 0 0\n1 0 0 0 1 1\n0 0 0 0 0 0\n \n\n样本输出\n集合1:\n不能被分割\n\n集合2:\n可以被分割\n\"\"\"\n\n\nS = []\nprint('输入:')\nwhile True:\n s = input()\n if s == '0 0 0 0 0 0':\n break\n S.append(s)\n\nprint('\\n输出:')\nw = [1, 2, 3, 4, 5, 6]\nfor k in range(len(S)):\n p = [int(i) for i in S[k].split()]\n _sum = sum(i * j for i, j in zip(w, p))\n if _sum % 2 != 0:\n print(f'集合{k + 1}:\\n不能被分割')\n continue\n V = _sum // 2\n\n n = len(w)\n dp = [False] * (V + 1)\n dp[0] = True # 只有0件物品能达到0价值\n\n for i in range(n):\n num, total = 1, p[i]\n while total > 0:\n if num > total:\n num = total\n group_w = w[i] * num\n for j in range(V, group_w - 1, -1):\n dp[j] = dp[j - group_w]\n total -= num\n num <<= 1\n\n if dp[V]:\n print(f'集合{k + 1}:\\n可以被分割')\n else:\n print(f'集合{k + 1}:\\n不能被分割')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
def patternToNumber(pattern):
if len(pattern) == 0:
return 0
return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])
def symbolToNumber(symbol):
if symbol == "A":
return 0
if symbol == "C":
return 1
if symbol == "G":
return 2
if symbol == "T":
return 3
def numberToPattern(index, k):
if k == 1:
return numberToSymbol(index)
return numberToPattern(index // 4, k-1) + numberToSymbol(index % 4)
def numberToSymbol(index):
if index == 0:
return "A"
if index == 1:
return "C"
if index == 2:
return "G"
if index == 3:
return "T"
def profileProbable(text, k, profile):
maxprob = 0
kmer = text[0:k]
for i in range(0, len(text) - k +1):
prob =1
pattern =text[i:i+k]
for j in range(k):
l = symbolToNumber(pattern[j])
prob *= profile [l][j]
if prob > maxprob:
maxprob =prob
kmer = pattern
return kmer
def hammingDistance(p, q):
ham = 0
for index, y in zip(p, q):
if index != y:
ham +=1
return ham
def distanceBetweenPatternAndString(pattern, DNA):
k = len(pattern)
distance = 0
for index in DNA:
hamming = k+1
for i in range(len(index) - k + 1):
z = hammingDistance(pattern, index[i:i+k])
if hamming > z:
hamming = z
distance += hamming
return distance
def profileForm(motifs):
k= len(motifs[0])
profile = [[1 for i in range(k)] for j in range(4)]
for index in motifs:
for i in range(len(index)):
j = symbolToNumber(index[i])
profile[j][i] +=1
for index in profile:
for i in range(len(index)):
index[i] = index[i]/len(motifs)
return profile
def consensus(profile):
str = ""
for i in range(len(profile[0])):
max = 0
loc = 0
for j in range(4):
if profile[j][i] > max:
loc = j
max = profile[j][i]
str+=numberToSymbol(loc)
return str
def score(motifs):
profile = profileForm(motifs)
cons = consensus(profile)
score = 0
for index in motifs:
for i in range(len(index)):
if cons[i] != index[i]:
score +=1
return score
def randomMotifSearch(DNA, k, t):
bestMotifs = []
motifs = []
for index in range(t):
random.seed()
i= random.randint(0, len(DNA[index])-k)
motifs.append(DNA[index][i:i+k])
bestMotifs = motifs.copy()
count = 0
while True:
profile = profileForm(motifs)
for index in range(t):
motifs[index] = profileProbable(DNA[index], k, profile)
if score(motifs) < score(bestMotifs):
bestMotifs = motifs.copy()
count +=1
else:
print(count)
return bestMotifs
k = 15
t = 20
DNA = ["ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT", "AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT", "CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA", "ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG", "TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT", "ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA", "CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT", "ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT", "CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA", "GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT", "AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG", "ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA", "AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA", "GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT", "TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA", "TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA", "TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA", "GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT", "TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG", "TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT"]
best = randomMotifSearch(DNA, k, t)
min = score(best)
for index in range(1000):
print(index)
a = randomMotifSearch(DNA, k, t)
if score(a) < score(best):
best = a
min = score(a)
print(min)
for index in best:
print(index)
|
normal
|
{
"blob_id": "51848a64102f7fe8272fcf56a9792ed50c430538",
"index": 9115,
"step-1": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\n<mask token>\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\n<mask token>\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\ndef numberToSymbol(index):\n if index == 0:\n return 'A'\n if index == 1:\n return 'C'\n if index == 2:\n return 'G'\n if index == 3:\n return 'T'\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\n<mask token>\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\ndef numberToSymbol(index):\n if index == 0:\n return 'A'\n if index == 1:\n return 'C'\n if index == 2:\n return 'G'\n if index == 3:\n return 'T'\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\ndef consensus(profile):\n str = ''\n for i in range(len(profile[0])):\n max = 0\n loc = 0\n for j in range(4):\n if profile[j][i] > max:\n loc = j\n max = profile[j][i]\n str += numberToSymbol(loc)\n return str\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\ndef randomMotifSearch(DNA, k, t):\n bestMotifs = []\n motifs = []\n for index in range(t):\n random.seed()\n i = random.randint(0, len(DNA[index]) - k)\n motifs.append(DNA[index][i:i + k])\n bestMotifs = motifs.copy()\n count = 0\n while True:\n profile = profileForm(motifs)\n for index in range(t):\n motifs[index] = profileProbable(DNA[index], k, profile)\n if score(motifs) < score(bestMotifs):\n bestMotifs = motifs.copy()\n count += 1\n else:\n print(count)\n return bestMotifs\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\n\ndef symbolToNumber(symbol):\n if symbol == 'A':\n return 0\n if symbol == 'C':\n return 1\n if symbol == 'G':\n return 2\n if symbol == 'T':\n return 3\n\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k - 1) + numberToSymbol(index % 4)\n\n\ndef numberToSymbol(index):\n if index == 0:\n return 'A'\n if index == 1:\n return 'C'\n if index == 2:\n return 'G'\n if index == 3:\n return 'T'\n\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k + 1):\n prob = 1\n pattern = text[i:i + k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile[l][j]\n if prob > maxprob:\n maxprob = prob\n kmer = pattern\n return kmer\n\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham += 1\n return ham\n\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k + 1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i + k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\n\ndef profileForm(motifs):\n k = len(motifs[0])\n profile = [[(1) for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] += 1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i] / len(motifs)\n return profile\n\n\ndef consensus(profile):\n str = ''\n for i in range(len(profile[0])):\n max = 0\n loc = 0\n for j in range(4):\n if profile[j][i] > max:\n loc = j\n max = profile[j][i]\n str += numberToSymbol(loc)\n return str\n\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score += 1\n return score\n\n\ndef randomMotifSearch(DNA, k, t):\n bestMotifs = []\n motifs = []\n for index in range(t):\n random.seed()\n i = random.randint(0, len(DNA[index]) - k)\n motifs.append(DNA[index][i:i + k])\n bestMotifs = motifs.copy()\n count = 0\n while True:\n profile = profileForm(motifs)\n for index in range(t):\n motifs[index] = profileProbable(DNA[index], k, profile)\n if score(motifs) < score(bestMotifs):\n bestMotifs = motifs.copy()\n count += 1\n else:\n print(count)\n return bestMotifs\n\n\nk = 15\nt = 20\nDNA = [\n 'ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT'\n ,\n 'AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT'\n ,\n 'CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA'\n ,\n 'ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG'\n ,\n 'TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT'\n ,\n 'ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA'\n ,\n 'CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT'\n ,\n 'ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT'\n ,\n 'CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA'\n ,\n 'GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT'\n ,\n 'AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG'\n ,\n 'ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA'\n ,\n 'AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA'\n ,\n 'GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT'\n ,\n 'TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA'\n ,\n 'TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA'\n ,\n 'TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA'\n ,\n 'GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT'\n ,\n 'TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG'\n ,\n 'TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT'\n ]\nbest = randomMotifSearch(DNA, k, t)\nmin = score(best)\nfor index in range(1000):\n print(index)\n a = randomMotifSearch(DNA, k, t)\n if score(a) < score(best):\n best = a\n min = score(a)\nprint(min)\nfor index in best:\n print(index)\n",
"step-5": "import random\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n return 4 * patternToNumber(pattern[0:-1]) + symbolToNumber(pattern[-1:])\n\ndef symbolToNumber(symbol):\n if symbol == \"A\":\n return 0\n if symbol == \"C\":\n return 1\n if symbol == \"G\":\n return 2\n if symbol == \"T\":\n return 3\n\ndef numberToPattern(index, k):\n if k == 1:\n return numberToSymbol(index)\n return numberToPattern(index // 4, k-1) + numberToSymbol(index % 4)\n\ndef numberToSymbol(index):\n if index == 0:\n return \"A\"\n if index == 1:\n return \"C\"\n if index == 2:\n return \"G\"\n if index == 3:\n return \"T\"\n\ndef profileProbable(text, k, profile):\n maxprob = 0\n kmer = text[0:k]\n for i in range(0, len(text) - k +1):\n prob =1\n pattern =text[i:i+k]\n for j in range(k):\n l = symbolToNumber(pattern[j])\n prob *= profile [l][j]\n if prob > maxprob:\n maxprob =prob\n kmer = pattern\n return kmer\n\ndef hammingDistance(p, q):\n ham = 0\n for index, y in zip(p, q):\n if index != y:\n ham +=1\n return ham\n\ndef distanceBetweenPatternAndString(pattern, DNA):\n k = len(pattern)\n distance = 0\n for index in DNA:\n hamming = k+1\n for i in range(len(index) - k + 1):\n z = hammingDistance(pattern, index[i:i+k])\n if hamming > z:\n hamming = z\n distance += hamming\n return distance\n\ndef profileForm(motifs):\n k= len(motifs[0])\n profile = [[1 for i in range(k)] for j in range(4)]\n for index in motifs:\n for i in range(len(index)):\n j = symbolToNumber(index[i])\n profile[j][i] +=1\n for index in profile:\n for i in range(len(index)):\n index[i] = index[i]/len(motifs)\n return profile\n\ndef consensus(profile):\n str = \"\"\n for i in range(len(profile[0])):\n max = 0\n loc = 0\n for j in range(4):\n if profile[j][i] > max:\n loc = j\n max = profile[j][i]\n str+=numberToSymbol(loc)\n return str\n\ndef score(motifs):\n profile = profileForm(motifs)\n cons = consensus(profile)\n score = 0\n for index in motifs:\n for i in range(len(index)):\n if cons[i] != index[i]:\n score +=1\n return score\n\ndef randomMotifSearch(DNA, k, t):\n bestMotifs = []\n motifs = []\n for index in range(t):\n random.seed()\n i= random.randint(0, len(DNA[index])-k)\n motifs.append(DNA[index][i:i+k])\n bestMotifs = motifs.copy()\n count = 0\n while True:\n profile = profileForm(motifs)\n for index in range(t):\n motifs[index] = profileProbable(DNA[index], k, profile)\n if score(motifs) < score(bestMotifs):\n bestMotifs = motifs.copy()\n count +=1\n else:\n print(count)\n return bestMotifs\n\nk = 15\nt = 20\nDNA = [\"ACTTATATCTAGAGTAAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCGAGTGATTGAACTGACTTATATCTAGAGT\", \"AAAGCCCTGATTCCATTGACGCGATCCCTACCTCCATCATACTCCACAGGTTCTTCAATAGAACATGGGGAAAACTGAGGTACACCAGGTCTAACGGAGATTTCTGGCACTAACTACCCAAAATCCTCTCGATCACCGACGAGTGATTGAACTGACTTATATCTAGAGT\", \"CACTCCCGTCCGTCTGACGCCAGGTGCTCTACCCCGCTGATTGTCTGGTACATAGCAGCCTATAGATCACCGATGCAGAAACACTTCGAGGCAGCCGATTTCGCTTATCACAACGTGACGGAATTTGATAAACCACGTACTCTAATACCGTCACGGGCCCATCAACGAA\", \"ACAAGAACTGGTGGGGAGACTATGACACTCTAGCGGTCGCATAAGGGCCGGAAACCAGGACAAATCGATAAGATGAAGCGGGGATATAAGCCTTATACTGCGACTGGTTCCTTATATTATTTAGCCCCGATTGATCACCGATTAAAATATTCTGCGGTTTTCGAGACGG\", \"TAACCACACCTAAAATTTTTCTTGGTGAGATGGACCCCCGCCGTAAATATCAGGATTAAATGTACGGATACCCATGACCCTCCAGTCATCTACCTTCCCGTGGTGGTCGCTCAGCCTTGTGCAGACCGAACTAGCACCTGTCACATACAATGTTGCCCGCATAGATCGT\", \"ATCCGACAGAGGCAGTGAATAAGGTTTCGTTTCCTCAGAGAGTAGAACTGCGTGTGACCTTGCCTTCACCGACATCCGTTTCCAATTGAGCTTTTCAGGACGTTTAGGTAACTGATTGTCATTGCAATTGTCCGGGGGATTTAGATGGCCGGGTACCTCTCGGACTATA\", \"CCTTGTTGCCACCGATTCGCGAGCAACATCGGAGTGCTCTGATTCACGGCGATGCTCCACGAAGAGGACCGCGGCACGACACGCCCTGTACCTACGTTTCTGGATATCCTCCGGCGAGTTAATAGAGCAATACGACCTGGTCGTCGAGATCGTGTATCTAGCCCTACCT\", \"ATAGGTTAACGAATCAGGAGAGTTAATTTTACCTAGCTAGAGCGGACGGTGCCTGGCTGTATTCGCGTTTGACTTTCGGGCTCGCTGATAACTTGTGATCACCTTTTACGCTTACTGGATCCAACGATGGATCAAAGTTGAGAATTTCTGTGCCTTGGGTGTGAGCTGT\", \"CTGACGAAAGGACGGGCGGTGTACTTAGTTTGGGGTAAAATAGTTGGTATAATTCTGTGCGACAGACATTTGGTCAGGCCATACTGCCATATCGTGATGTAACTATCCACACTACGTCATAGGCCCTTGTGATCAATTAAACGTTCCTCATGCCAGGCTATCTGTTTAA\", \"GGCTTCGCGTTTAAGGCTGGATTAAGTACTCCGCCTTGTGATCTGTGATCCTCCGACCTGTGATCAGCAAGATTGGAACCTAGGTAGGCGGCGGGTCTACGCTGGCCCACAATCGTGAGTCCCCCACTCCGTAGGTTGTGGAATTTATAGACCCGCAAGGGGCACCACT\", \"AGGATGACACCCAGGATGAATCTGGATTAGGAACACCAACCCGACATATTTGTTACCGCTGCAGCATTTCGCTCTTGGACGCGTAACCCGAGATCCGTCTCGCGATCGTCACGGATCGGGATTATGCAGGCAATACCTTGTGATCACTCCGCGCTTGGTTTTGCTAGCG\", \"ACATCTCTAGTCACTTTTATTGAGCAGGTGGGCGGATTCATGATCCGGCTCTGTCGTACGTCCAACCACGGTGACATGTTCGGAGCTGTCGCCGTGGAGCAGAGATACATCGGATCTATCAATTTTACTAAGAGCAACTAGCCACGACAAACTGTGATCACCGATTGGA\", \"AATTTGCGTATCTCTAGGACTCCCTCATACAAATCAAAGCTTGGATGGGTAAGATGCCGCAGCAGCAGGTATCTCATATTGGCTATTAAGAGCCAGGCCCTATGGCCTTAGTATCACCGATCAGACGTCGCATGAGCGGGCCCGTTGTCCTATCTCTTTAGCTGCCGCA\", \"GAAGTAAAGGGGTTCCACTGCGTAGAGCGTGCCCCTCTGGTGTGCCGTACTGTTATGGTGATACAGCTTCCTTATACCCCTCGTAAAGCGGCTAATGGTCCTAATGAATGCCCTTGTGAAATCCGAATCGCTTTACAATTGCGTTCGGCGGAATGCAGTCACCAGTGTT\", \"TACACTACGCGTTATTTACTTTTACTGAGTCCTTGTCGCCACCGAACGAGGATTGTTCATTGTATCCGGAGATTAGGAGTTCGCATCGCTGACACAGCCAGTTCGTAGCAAATACCGCTGGCCCTGGGCACTCCAGATCAGAACTACTAGCCCTAAACTCTATGACACA\", \"TTGGGTCTCGATCCCTCTATGTTAAGCTGTTCCGTGGAGAATCTCCTGGGTTTTATGATTTGAATGACGAGAATTGGGAAGTCGGGATGTTGTGATCACCGCCGTTCGCTTTCATAAATGAACCCCTTTTTTTCAGCAGACGGTGGCCTTTCCCTTTCATCATTATACA\", \"TTTCAAGTTACTACCGCCCTCTAGCGATAGAACTGAGGCAAATCATACACCGTGATCACCGACCCATGGAGTTTGACTCAGATTTACACTTTTAGGGGAACATGTTTGTCGGTCAGAGGTGTCAATTATTAGCAGATATCCCCCAACGCAGCGAGAGAGCACGGAGTGA\", \"GATCCATTACCCTACGATATGTATATAGCGCCCTAGTACGGCTTCTCCCTTGCAGACACGCAGGCGCTGTGCGCTATCGGCTTCCTCGGACATTCCTGGATATAAGTAACGGCGAACTGGCTATCACTACCGCCGCTCCTTAAGCCTTGGTTTCACCGACGATTGTCGT\", \"TAGTAGATTATTACCTGTGGACCGTTAGCTTCAAGACCGAAACGTTGGTGATGCTACTTAAATGTCAAGAGTTGCGAAGTTGGGCGAAGCACATCCGTACTCCCAAGTGGACGATCGATAGATCCATGGAGTTTCCATCCATCTTAATCCGCCCTTTGCATCACCGACG\", \"TACAAGGCACAAACGAGACCTGATCGAACGGTGCACGGTCGAGGCAGCGAGATAAATGTACATTGAGAGCACCTTGTGATTTACGACCTGCATCGAAGGTTTCTTGGCACCCACCTGTCGTCCGCCAGGGCAGAGCCGACATTATATGACGCTGATGTACGAAGCCCCT\"]\nbest = randomMotifSearch(DNA, k, t)\nmin = score(best)\nfor index in range(1000):\n print(index)\n a = randomMotifSearch(DNA, k, t)\n if score(a) < score(best):\n best = a\n min = score(a)\nprint(min)\nfor index in best:\n print(index)",
"step-ids": [
8,
9,
11,
13,
15
]
}
|
[
8,
9,
11,
13,
15
] |
<|reserved_special_token_0|>
class CampaignPerformance:
<|reserved_special_token_0|>
def __init__(self, campaign, start):
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
<|reserved_special_token_0|>
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, '%Y-%m-%d').date()
except Exception as e:
raise ParseError('start argument not valid')
self.start_date = start_date
def _get_querysets(self):
spend = Spend.objects.filter(platform__client=self.campaign.client)
spend = spend.filter(platform__pk__in=self.campaign.platforms.
values_list('pk', flat=True))
spend = spend.filter(end_date__gte=self.start_date)
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
budgets = self.campaign.budget_set
budgets = budgets.filter(end_date__gte=self.start_date)
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
<|reserved_special_token_0|>
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,
errors='ignore').sum(axis=1)
<|reserved_special_token_0|>
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
<|reserved_special_token_0|>
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {'spend': spend_sum, 'budget': budget_sum,
'avg_spend_per_day': spend_sum / spend_days,
'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,
'avg_diff_per_day': diff / spend_days}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {'last_spend': self.spend_series.dropna().index[-1]}
return info
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CampaignPerformance:
<|reserved_special_token_0|>
def __init__(self, campaign, start):
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
def get(self, filt=None):
""" Return data
filt: only return certain data (list)
"""
self.check_required()
results = {}
if filt is None:
filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']
if 'daily_diff' in filt or 'cum_diff' in filt:
daily_diff = self.get_daily_diff()
if 'daily_data' in filt or 'daily_diff' in filt:
results['daily_index'] = self.daily_df.index
if 'daily_data' in filt:
results['daily_data'] = self.daily_df.to_dict('list')
if 'daily_diff' in filt:
results['daily_diff'] = daily_diff
if 'totals' in filt:
results['totals'] = self.get_totals()
if 'info' in filt:
results['info'] = self.get_info()
if 'cum_diff' in filt:
results['cum_diff'] = self.get_cum_diff(daily_diff)
print(results)
return results
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, '%Y-%m-%d').date()
except Exception as e:
raise ParseError('start argument not valid')
self.start_date = start_date
def _get_querysets(self):
spend = Spend.objects.filter(platform__client=self.campaign.client)
spend = spend.filter(platform__pk__in=self.campaign.platforms.
values_list('pk', flat=True))
spend = spend.filter(end_date__gte=self.start_date)
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
budgets = self.campaign.budget_set
budgets = budgets.filter(end_date__gte=self.start_date)
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
def _convert_spend_currency(self):
if self.spend.count() > 0:
spend_cur = list(set(self.spend.values_list('currency', flat=True))
)
if spend_cur != [self.campaign.currency]:
raise NotImplementedError(
'Currency converting not implemented, make sure budgets and spends are in the same currency'
)
self.spend = list(self.spend)
else:
self.spend = []
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,
errors='ignore').sum(axis=1)
def check_required(self):
""" Functions needed for any of the public methods to work """
if not self.required_ran:
self._get_start_date()
self._get_querysets()
self._convert_spend_currency()
self._convert_to_daily_df()
self._get_budget_spend_series()
self.required_ran = True
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
<|reserved_special_token_0|>
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {'spend': spend_sum, 'budget': budget_sum,
'avg_spend_per_day': spend_sum / spend_days,
'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,
'avg_diff_per_day': diff / spend_days}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {'last_spend': self.spend_series.dropna().index[-1]}
return info
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CampaignPerformance:
<|reserved_special_token_0|>
def __init__(self, campaign, start):
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
def get(self, filt=None):
""" Return data
filt: only return certain data (list)
"""
self.check_required()
results = {}
if filt is None:
filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']
if 'daily_diff' in filt or 'cum_diff' in filt:
daily_diff = self.get_daily_diff()
if 'daily_data' in filt or 'daily_diff' in filt:
results['daily_index'] = self.daily_df.index
if 'daily_data' in filt:
results['daily_data'] = self.daily_df.to_dict('list')
if 'daily_diff' in filt:
results['daily_diff'] = daily_diff
if 'totals' in filt:
results['totals'] = self.get_totals()
if 'info' in filt:
results['info'] = self.get_info()
if 'cum_diff' in filt:
results['cum_diff'] = self.get_cum_diff(daily_diff)
print(results)
return results
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, '%Y-%m-%d').date()
except Exception as e:
raise ParseError('start argument not valid')
self.start_date = start_date
def _get_querysets(self):
spend = Spend.objects.filter(platform__client=self.campaign.client)
spend = spend.filter(platform__pk__in=self.campaign.platforms.
values_list('pk', flat=True))
spend = spend.filter(end_date__gte=self.start_date)
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
budgets = self.campaign.budget_set
budgets = budgets.filter(end_date__gte=self.start_date)
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
def _convert_spend_currency(self):
if self.spend.count() > 0:
spend_cur = list(set(self.spend.values_list('currency', flat=True))
)
if spend_cur != [self.campaign.currency]:
raise NotImplementedError(
'Currency converting not implemented, make sure budgets and spends are in the same currency'
)
self.spend = list(self.spend)
else:
self.spend = []
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,
errors='ignore').sum(axis=1)
def check_required(self):
""" Functions needed for any of the public methods to work """
if not self.required_ran:
self._get_start_date()
self._get_querysets()
self._convert_spend_currency()
self._convert_to_daily_df()
self._get_budget_spend_series()
self.required_ran = True
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
def get_cum_diff(self, daily_diff):
self.check_required()
return daily_diff.cumsum()
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {'spend': spend_sum, 'budget': budget_sum,
'avg_spend_per_day': spend_sum / spend_days,
'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,
'avg_diff_per_day': diff / spend_days}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {'last_spend': self.spend_series.dropna().index[-1]}
return info
<|reserved_special_token_1|>
from clients.models import Budget
from clients.models import Spend
from datetime import date as datetimedate
from datetime import datetime
from datetime import timedelta
from django.db import models
from rest_framework.exceptions import ParseError
import math
import pandas as pd
class CampaignPerformance:
""" Get aggregated info about one campaign """
def __init__(self, campaign, start):
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
def get(self, filt=None):
""" Return data
filt: only return certain data (list)
"""
self.check_required()
results = {}
if filt is None:
filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']
if 'daily_diff' in filt or 'cum_diff' in filt:
daily_diff = self.get_daily_diff()
if 'daily_data' in filt or 'daily_diff' in filt:
results['daily_index'] = self.daily_df.index
if 'daily_data' in filt:
results['daily_data'] = self.daily_df.to_dict('list')
if 'daily_diff' in filt:
results['daily_diff'] = daily_diff
if 'totals' in filt:
results['totals'] = self.get_totals()
if 'info' in filt:
results['info'] = self.get_info()
if 'cum_diff' in filt:
results['cum_diff'] = self.get_cum_diff(daily_diff)
print(results)
return results
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, '%Y-%m-%d').date()
except Exception as e:
raise ParseError('start argument not valid')
self.start_date = start_date
def _get_querysets(self):
spend = Spend.objects.filter(platform__client=self.campaign.client)
spend = spend.filter(platform__pk__in=self.campaign.platforms.
values_list('pk', flat=True))
spend = spend.filter(end_date__gte=self.start_date)
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
budgets = self.campaign.budget_set
budgets = budgets.filter(end_date__gte=self.start_date)
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
def _convert_spend_currency(self):
if self.spend.count() > 0:
spend_cur = list(set(self.spend.values_list('currency', flat=True))
)
if spend_cur != [self.campaign.currency]:
raise NotImplementedError(
'Currency converting not implemented, make sure budgets and spends are in the same currency'
)
self.spend = list(self.spend)
else:
self.spend = []
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,
errors='ignore').sum(axis=1)
def check_required(self):
""" Functions needed for any of the public methods to work """
if not self.required_ran:
self._get_start_date()
self._get_querysets()
self._convert_spend_currency()
self._convert_to_daily_df()
self._get_budget_spend_series()
self.required_ran = True
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
def get_cum_diff(self, daily_diff):
self.check_required()
return daily_diff.cumsum()
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {'spend': spend_sum, 'budget': budget_sum,
'avg_spend_per_day': spend_sum / spend_days,
'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,
'avg_diff_per_day': diff / spend_days}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {'last_spend': self.spend_series.dropna().index[-1]}
return info
<|reserved_special_token_1|>
from clients.models import Budget
from clients.models import Spend
from datetime import date as datetimedate
from datetime import datetime
from datetime import timedelta
from django.db import models
from rest_framework.exceptions import ParseError
import math
import pandas as pd
class CampaignPerformance:
""" Get aggregated info about one campaign """
def __init__(self, campaign, start):
# Initial arguments
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
def get(self, filt=None):
""" Return data
filt: only return certain data (list)
"""
# Required functions
self.check_required()
# Filter output
results = {}
if filt is None:
filt = [
'daily_data', 'daily_diff', 'cum_diff',
'totals', 'info'
]
# Optional functions
# Prerequisits to multiple funcions
if 'daily_diff' in filt or 'cum_diff' in filt:
daily_diff = self.get_daily_diff()
if 'daily_data' in filt or 'daily_diff' in filt:
results['daily_index'] = self.daily_df.index
# Single functions
if 'daily_data' in filt:
results['daily_data'] = self.daily_df.to_dict('list')
if 'daily_diff' in filt:
results['daily_diff'] = daily_diff
if 'totals' in filt:
results['totals'] = self.get_totals()
if 'info' in filt:
results['info'] = self.get_info()
if 'cum_diff' in filt:
results['cum_diff'] = self.get_cum_diff(daily_diff)
# results['recommend'] = {'spend_per_day', 'spend_diff(spend per day vs avg_past_spend_per_day)'}
print(results)
return results
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(
today.year,
((quarter - 1) * 3) + 1,
1
)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, "%Y-%m-%d").date()
except Exception as e:
raise ParseError("start argument not valid")
self.start_date = start_date
def _get_querysets(self):
# GET SPEND
# Only for same client as campaign
spend = Spend.objects.filter(platform__client=self.campaign.client)
# Only for same platforms as campaign
spend = spend.filter(
platform__pk__in=(
self.campaign.platforms.values_list('pk', flat=True)
)
)
# Only where spend end_date >= start_date
spend = spend.filter(end_date__gte=self.start_date)
# Apply regex filter to spend if provided by campaign
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
# GET BUDGETS
budgets = self.campaign.budget_set
# Only where budget end_date >= start_date
budgets = budgets.filter(end_date__gte=self.start_date)
# SAVE
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
# Calculate amount per day
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
# Change datetime dates to string and fillNA for later json
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
def _convert_spend_currency(self):
if self.spend.count() > 0:
spend_cur = list(set(
self.spend.values_list('currency', flat=True)
))
if spend_cur != [self.campaign.currency]:
raise NotImplementedError(
"Currency converting not implemented, make sure budgets "
"and spends are in the same currency"
)
# Convert spend to list so that we can alter change currency
self.spend = list(self.spend)
else:
self.spend = []
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = (
self.daily_df
.drop(self.BUDGETS_NAME, axis=1, errors='ignore')
.sum(axis=1)
)
def check_required(self):
""" Functions needed for any of the public methods to work """
if not self.required_ran:
self._get_start_date()
self._get_querysets()
self._convert_spend_currency()
self._convert_to_daily_df()
self._get_budget_spend_series()
self.required_ran = True
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
def get_cum_diff(self, daily_diff):
self.check_required()
return daily_diff.cumsum()
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {
'spend': spend_sum,
'budget': budget_sum,
'avg_spend_per_day': (
spend_sum / spend_days
),
'avg_budget_per_day': (
budget_sum / budget_days
),
'diff': diff,
'avg_diff_per_day': diff / spend_days
}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {
'last_spend': self.spend_series.dropna().index[-1]
}
return info
|
flexible
|
{
"blob_id": "a860e6670719a733e75c7580cf2e07765b0777eb",
"index": 2806,
"step-1": "<mask token>\n\n\nclass CampaignPerformance:\n <mask token>\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n <mask token>\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n <mask token>\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n <mask token>\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n <mask token>\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-2": "<mask token>\n\n\nclass CampaignPerformance:\n <mask token>\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n self.check_required()\n results = {}\n if filt is None:\n filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(self.spend.values_list('currency', flat=True))\n )\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n 'Currency converting not implemented, make sure budgets and spends are in the same currency'\n )\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n <mask token>\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-3": "<mask token>\n\n\nclass CampaignPerformance:\n <mask token>\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n self.check_required()\n results = {}\n if filt is None:\n filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(self.spend.values_list('currency', flat=True))\n )\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n 'Currency converting not implemented, make sure budgets and spends are in the same currency'\n )\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n\n def get_cum_diff(self, daily_diff):\n self.check_required()\n return daily_diff.cumsum()\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-4": "from clients.models import Budget\nfrom clients.models import Spend\nfrom datetime import date as datetimedate\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom django.db import models\nfrom rest_framework.exceptions import ParseError\nimport math\nimport pandas as pd\n\n\nclass CampaignPerformance:\n \"\"\" Get aggregated info about one campaign \"\"\"\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n self.check_required()\n results = {}\n if filt is None:\n filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(self.spend.values_list('currency', flat=True))\n )\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n 'Currency converting not implemented, make sure budgets and spends are in the same currency'\n )\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n\n def get_cum_diff(self, daily_diff):\n self.check_required()\n return daily_diff.cumsum()\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-5": "from clients.models import Budget\nfrom clients.models import Spend\nfrom datetime import date as datetimedate\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom django.db import models\nfrom rest_framework.exceptions import ParseError\nimport math\nimport pandas as pd\n\n\nclass CampaignPerformance:\n \"\"\" Get aggregated info about one campaign \"\"\"\n def __init__(self, campaign, start):\n # Initial arguments\n self.campaign = campaign\n self.start = start\n\n self.BUDGETS_NAME = 'Budgets'\n\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n # Required functions\n self.check_required()\n\n # Filter output\n results = {}\n if filt is None:\n filt = [\n 'daily_data', 'daily_diff', 'cum_diff',\n 'totals', 'info'\n ]\n\n # Optional functions\n # Prerequisits to multiple funcions\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n\n # Single functions\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n # results['recommend'] = {'spend_per_day', 'spend_diff(spend per day vs avg_past_spend_per_day)'}\n\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(\n today.year,\n ((quarter - 1) * 3) + 1,\n 1\n )\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, \"%Y-%m-%d\").date()\n except Exception as e:\n raise ParseError(\"start argument not valid\")\n\n self.start_date = start_date\n\n def _get_querysets(self):\n # GET SPEND\n # Only for same client as campaign\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n # Only for same platforms as campaign\n spend = spend.filter(\n platform__pk__in=(\n self.campaign.platforms.values_list('pk', flat=True)\n )\n )\n # Only where spend end_date >= start_date\n spend = spend.filter(end_date__gte=self.start_date)\n # Apply regex filter to spend if provided by campaign\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n\n # GET BUDGETS\n budgets = self.campaign.budget_set\n # Only where budget end_date >= start_date\n budgets = budgets.filter(end_date__gte=self.start_date)\n\n # SAVE\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n # Calculate amount per day\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n\n df = pd.DataFrame(daily)\n # Change datetime dates to string and fillNA for later json\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(\n self.spend.values_list('currency', flat=True)\n ))\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n \"Currency converting not implemented, make sure budgets \"\n \"and spends are in the same currency\"\n )\n # Convert spend to list so that we can alter change currency\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n\n self.spend_series = (\n self.daily_df\n .drop(self.BUDGETS_NAME, axis=1, errors='ignore')\n .sum(axis=1)\n )\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n\n def get_cum_diff(self, daily_diff):\n self.check_required()\n return daily_diff.cumsum()\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {\n 'spend': spend_sum,\n 'budget': budget_sum,\n 'avg_spend_per_day': (\n spend_sum / spend_days\n ),\n 'avg_budget_per_day': (\n budget_sum / budget_days\n ),\n 'diff': diff,\n 'avg_diff_per_day': diff / spend_days\n }\n\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n\n return totals\n\n def get_info(self):\n info = {\n 'last_spend': self.spend_series.dropna().index[-1]\n }\n\n return info\n",
"step-ids": [
9,
12,
13,
15,
16
]
}
|
[
9,
12,
13,
15,
16
] |
<|reserved_special_token_0|>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='c1@test.com', username='customer1', password='customer1')
cru(email='c2@test.com', username='customer2', password='customer2')
cru(email='v1@test.com', username='vendor1', password='vendor1')
cru(email='vendor2@test.com', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TicketshopLayer(PloneSandboxLayer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
<|reserved_special_token_0|>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='c1@test.com', username='customer1', password='customer1')
cru(email='c2@test.com', username='customer2', password='customer2')
cru(email='v1@test.com', username='vendor1', password='vendor1')
cru(email='vendor2@test.com', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
<|reserved_special_token_0|>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='c1@test.com', username='customer1', password='customer1')
cru(email='c2@test.com', username='customer2', password='customer2')
cru(email='v1@test.com', username='vendor1', password='vendor1')
cru(email='vendor2@test.com', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
<|reserved_special_token_0|>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='c1@test.com', username='customer1', password='customer1')
cru(email='c2@test.com', username='customer2', password='customer2')
cru(email='v1@test.com', username='vendor1', password='vendor1')
cru(email='vendor2@test.com', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from Products.CMFPlone.utils import getFSVersionTuple
from bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer
from plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.testing import z2
from zope.interface import alsoProvides
import plone.api
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
# Uninstall old-style Products
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
Ticketshop_FIXTURE = TicketshopLayer()
Ticketshop_INTEGRATION_TESTING = IntegrationTesting(
bases=(Ticketshop_FIXTURE,),
name="Ticketshop:Integration")
class TicketshopATLayer(PloneSandboxLayer):
# don't use shop fixture here. looks like, test layers use differen ZODB
# connections and c.z.datagriedfield fails with a ZODB object reference
# error.
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes,
context=configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain("one_state_workflow")
setRoles(portal, TEST_USER_ID, ['Manager'])
# Create test users
cru = plone.api.user.create
cru(email="c1@test.com", username="customer1", password="customer1")
cru(email="c2@test.com", username="customer2", password="customer2")
cru(email="v1@test.com", username="vendor1", password="vendor1")
cru(email="vendor2@test.com", username="vendor2", password="vendor2")
# Create test content
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title="item_11")
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title="item_12")
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title="item_21")
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title="item_22")
TicketshopAT_FIXTURE = TicketshopATLayer()
TicketshopAT_INTEGRATION_TESTING = IntegrationTesting(
bases=(TicketshopAT_FIXTURE,),
name="TicketshopAT:Integration")
TicketshopAT_ROBOT_TESTING = FunctionalTesting(
bases=(
MOCK_MAILHOST_FIXTURE,
TicketshopAT_FIXTURE,
z2.ZSERVER_FIXTURE
),
name="TicketshopAT:Robot")
|
flexible
|
{
"blob_id": "5d7080f2778133d1938853512ca038edcf7c0dc4",
"index": 1002,
"step-1": "<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='c1@test.com', username='customer1', password='customer1')\n cru(email='c2@test.com', username='customer2', password='customer2')\n cru(email='v1@test.com', username='vendor1', password='vendor1')\n cru(email='vendor2@test.com', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <mask token>\n <mask token>\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='c1@test.com', username='customer1', password='customer1')\n cru(email='c2@test.com', username='customer2', password='customer2')\n cru(email='v1@test.com', username='vendor1', password='vendor1')\n cru(email='vendor2@test.com', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='c1@test.com', username='customer1', password='customer1')\n cru(email='c2@test.com', username='customer2', password='customer2')\n cru(email='v1@test.com', username='vendor1', password='vendor1')\n cru(email='vendor2@test.com', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-4": "<mask token>\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='c1@test.com', username='customer1', password='customer1')\n cru(email='c2@test.com', username='customer2', password='customer2')\n cru(email='v1@test.com', username='vendor1', password='vendor1')\n cru(email='vendor2@test.com', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-5": "from Products.CMFPlone.utils import getFSVersionTuple\nfrom bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer\nfrom plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import setRoles\nfrom plone.testing import z2\nfrom zope.interface import alsoProvides\nimport plone.api\n\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n # Uninstall old-style Products\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\nTicketshop_FIXTURE = TicketshopLayer()\nTicketshop_INTEGRATION_TESTING = IntegrationTesting(\n bases=(Ticketshop_FIXTURE,),\n name=\"Ticketshop:Integration\")\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n # don't use shop fixture here. looks like, test layers use differen ZODB\n # connections and c.z.datagriedfield fails with a ZODB object reference\n # error.\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes,\n context=configurationContext)\n\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n\n portal.portal_workflow.setDefaultChain(\"one_state_workflow\")\n setRoles(portal, TEST_USER_ID, ['Manager'])\n\n # Create test users\n cru = plone.api.user.create\n cru(email=\"c1@test.com\", username=\"customer1\", password=\"customer1\")\n cru(email=\"c2@test.com\", username=\"customer2\", password=\"customer2\")\n cru(email=\"v1@test.com\", username=\"vendor1\", password=\"vendor1\")\n cru(email=\"vendor2@test.com\", username=\"vendor2\", password=\"vendor2\")\n\n # Create test content\n crc = plone.api.content.create\n\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title=\"item_11\")\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title=\"item_12\")\n\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title=\"item_21\")\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title=\"item_22\")\n\n\nTicketshopAT_FIXTURE = TicketshopATLayer()\nTicketshopAT_INTEGRATION_TESTING = IntegrationTesting(\n bases=(TicketshopAT_FIXTURE,),\n name=\"TicketshopAT:Integration\")\nTicketshopAT_ROBOT_TESTING = FunctionalTesting(\n bases=(\n MOCK_MAILHOST_FIXTURE,\n TicketshopAT_FIXTURE,\n z2.ZSERVER_FIXTURE\n ),\n name=\"TicketshopAT:Robot\")\n",
"step-ids": [
4,
7,
10,
11,
14
]
}
|
[
4,
7,
10,
11,
14
] |
# Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.
num1 = 0
for c in range(0,7):
pe1 = int(input('Digite o ano de nascimento: '))
pe1 = 2019 - pe1
if pe1 >= 21:
num1 = num1 + 1
print(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')
|
normal
|
{
"blob_id": "251d589a5815d77d2bc375d8d4a7d41e79a2a5cd",
"index": 5303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in range(0, 7):\n pe1 = int(input('Digite o ano de nascimento: '))\n pe1 = 2019 - pe1\n if pe1 >= 21:\n num1 = num1 + 1\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')\n",
"step-3": "num1 = 0\nfor c in range(0, 7):\n pe1 = int(input('Digite o ano de nascimento: '))\n pe1 = 2019 - pe1\n if pe1 >= 21:\n num1 = num1 + 1\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')\n",
"step-4": "# Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.\r\n\r\n\r\nnum1 = 0\r\nfor c in range(0,7):\r\n pe1 = int(input('Digite o ano de nascimento: '))\r\n pe1 = 2019 - pe1\r\n if pe1 >= 21:\r\n num1 = num1 + 1\r\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def partition3(array, left, right):
pivot = array[right]
begin = left - 1
end = left - 1
for j in range(left, right):
if array[j] < pivot:
begin += 1
array[begin], array[j] = array[j], array[begin]
end += 1
if array[j] == pivot:
array[end], array[j] = array[j], array[end]
elif array[j] == pivot:
end += 1
array[end], array[j] = array[j], array[end]
array[end + 1], array[right] = array[right], array[end + 1]
return begin + 1, end + 1
def randomized_quick_sort(array, left, right):
if left >= right:
return
k = randint(left, right)
array[left], array[k] = array[k], array[left]
small, equal = partition3(array, left, right)
randomized_quick_sort(array, left, small - 1)
randomized_quick_sort(array, equal + 1, right)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def partition3(array, left, right):
pivot = array[right]
begin = left - 1
end = left - 1
for j in range(left, right):
if array[j] < pivot:
begin += 1
array[begin], array[j] = array[j], array[begin]
end += 1
if array[j] == pivot:
array[end], array[j] = array[j], array[end]
elif array[j] == pivot:
end += 1
array[end], array[j] = array[j], array[end]
array[end + 1], array[right] = array[right], array[end + 1]
return begin + 1, end + 1
def randomized_quick_sort(array, left, right):
if left >= right:
return
k = randint(left, right)
array[left], array[k] = array[k], array[left]
small, equal = partition3(array, left, right)
randomized_quick_sort(array, left, small - 1)
randomized_quick_sort(array, equal + 1, right)
if __name__ == '__main__':
input_n = int(input())
elements = list(map(int, input().split()))
assert len(elements) == input_n
randomized_quick_sort(elements, 0, len(elements) - 1)
print(*elements)
<|reserved_special_token_1|>
from random import randint
def partition3(array, left, right):
pivot = array[right]
begin = left - 1
end = left - 1
for j in range(left, right):
if array[j] < pivot:
begin += 1
array[begin], array[j] = array[j], array[begin]
end += 1
if array[j] == pivot:
array[end], array[j] = array[j], array[end]
elif array[j] == pivot:
end += 1
array[end], array[j] = array[j], array[end]
array[end + 1], array[right] = array[right], array[end + 1]
return begin + 1, end + 1
def randomized_quick_sort(array, left, right):
if left >= right:
return
k = randint(left, right)
array[left], array[k] = array[k], array[left]
small, equal = partition3(array, left, right)
randomized_quick_sort(array, left, small - 1)
randomized_quick_sort(array, equal + 1, right)
if __name__ == '__main__':
input_n = int(input())
elements = list(map(int, input().split()))
assert len(elements) == input_n
randomized_quick_sort(elements, 0, len(elements) - 1)
print(*elements)
<|reserved_special_token_1|>
# python3
from random import randint
def partition3(array, left, right):
pivot = array[right]
begin = left - 1
end = left - 1
for j in range(left, right):
if array[j] < pivot:
begin += 1
array[begin], array[j] = array[j], array[begin]
end += 1
if array[j] == pivot:
array[end], array[j] = array[j], array[end]
elif array[j] == pivot:
end += 1
array[end], array[j] = array[j], array[end]
array[end + 1], array[right] = array[right], array[end + 1]
return begin + 1, end + 1
def randomized_quick_sort(array, left, right):
if left >= right:
return
k = randint(left, right)
array[left], array[k] = array[k], array[left]
small, equal = partition3(array, left, right)
randomized_quick_sort(array, left, small - 1)
randomized_quick_sort(array, equal + 1, right)
if __name__ == '__main__':
input_n = int(input())
elements = list(map(int, input().split()))
assert len(elements) == input_n
randomized_quick_sort(elements, 0, len(elements) - 1)
print(*elements)
|
flexible
|
{
"blob_id": "a2fc9d947c75eaaaeafcd92750c99f4cfcdb9d7d",
"index": 4517,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef partition3(array, left, right):\n pivot = array[right]\n begin = left - 1\n end = left - 1\n for j in range(left, right):\n if array[j] < pivot:\n begin += 1\n array[begin], array[j] = array[j], array[begin]\n end += 1\n if array[j] == pivot:\n array[end], array[j] = array[j], array[end]\n elif array[j] == pivot:\n end += 1\n array[end], array[j] = array[j], array[end]\n array[end + 1], array[right] = array[right], array[end + 1]\n return begin + 1, end + 1\n\n\ndef randomized_quick_sort(array, left, right):\n if left >= right:\n return\n k = randint(left, right)\n array[left], array[k] = array[k], array[left]\n small, equal = partition3(array, left, right)\n randomized_quick_sort(array, left, small - 1)\n randomized_quick_sort(array, equal + 1, right)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef partition3(array, left, right):\n pivot = array[right]\n begin = left - 1\n end = left - 1\n for j in range(left, right):\n if array[j] < pivot:\n begin += 1\n array[begin], array[j] = array[j], array[begin]\n end += 1\n if array[j] == pivot:\n array[end], array[j] = array[j], array[end]\n elif array[j] == pivot:\n end += 1\n array[end], array[j] = array[j], array[end]\n array[end + 1], array[right] = array[right], array[end + 1]\n return begin + 1, end + 1\n\n\ndef randomized_quick_sort(array, left, right):\n if left >= right:\n return\n k = randint(left, right)\n array[left], array[k] = array[k], array[left]\n small, equal = partition3(array, left, right)\n randomized_quick_sort(array, left, small - 1)\n randomized_quick_sort(array, equal + 1, right)\n\n\nif __name__ == '__main__':\n input_n = int(input())\n elements = list(map(int, input().split()))\n assert len(elements) == input_n\n randomized_quick_sort(elements, 0, len(elements) - 1)\n print(*elements)\n",
"step-4": "from random import randint\n\n\ndef partition3(array, left, right):\n pivot = array[right]\n begin = left - 1\n end = left - 1\n for j in range(left, right):\n if array[j] < pivot:\n begin += 1\n array[begin], array[j] = array[j], array[begin]\n end += 1\n if array[j] == pivot:\n array[end], array[j] = array[j], array[end]\n elif array[j] == pivot:\n end += 1\n array[end], array[j] = array[j], array[end]\n array[end + 1], array[right] = array[right], array[end + 1]\n return begin + 1, end + 1\n\n\ndef randomized_quick_sort(array, left, right):\n if left >= right:\n return\n k = randint(left, right)\n array[left], array[k] = array[k], array[left]\n small, equal = partition3(array, left, right)\n randomized_quick_sort(array, left, small - 1)\n randomized_quick_sort(array, equal + 1, right)\n\n\nif __name__ == '__main__':\n input_n = int(input())\n elements = list(map(int, input().split()))\n assert len(elements) == input_n\n randomized_quick_sort(elements, 0, len(elements) - 1)\n print(*elements)\n",
"step-5": "# python3\n\nfrom random import randint\n\n\ndef partition3(array, left, right):\n pivot = array[right]\n begin = left - 1\n end = left - 1\n for j in range(left, right):\n if array[j] < pivot:\n begin += 1\n array[begin], array[j] = array[j], array[begin]\n end += 1\n if array[j] == pivot:\n array[end], array[j] = array[j], array[end]\n elif array[j] == pivot:\n end += 1\n array[end], array[j] = array[j], array[end]\n\n array[end + 1], array[right] = array[right], array[end + 1]\n return begin + 1, end + 1\n\ndef randomized_quick_sort(array, left, right):\n if left >= right:\n return\n k = randint(left, right)\n array[left], array[k] = array[k], array[left]\n\n small, equal = partition3(array, left, right)\n randomized_quick_sort(array, left, small - 1)\n randomized_quick_sort(array, equal + 1, right)\n\n\nif __name__ == '__main__':\n input_n = int(input())\n elements = list(map(int, input().split()))\n assert len(elements) == input_n\n randomized_quick_sort(elements, 0, len(elements) - 1)\n print(*elements)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
def summation(numbers):
positive_numbers = []
normalized_numbers = []
numbers_list = numbers.split()
for idx, arg in enumerate(numbers_list):
int_arg = int(arg)
if int_arg < 0:
new_arg = abs(int_arg) * 2
else:
new_arg = int_arg
positive_numbers.append(new_arg)
max_of_positive_numbers = max(positive_numbers)
for idx, arg in enumerate(positive_numbers):
normalized_arg = arg / max_of_positive_numbers
normalized_numbers.append(normalized_arg)
print(sum(normalized_numbers))
|
normal
|
{
"blob_id": "791df87235f5da634fc62ebc3a3741cea6e2deca",
"index": 3841,
"step-1": "<mask token>\n",
"step-2": "def summation(numbers):\n positive_numbers = []\n normalized_numbers = []\n numbers_list = numbers.split()\n for idx, arg in enumerate(numbers_list):\n int_arg = int(arg)\n if int_arg < 0:\n new_arg = abs(int_arg) * 2\n else:\n new_arg = int_arg\n positive_numbers.append(new_arg)\n max_of_positive_numbers = max(positive_numbers)\n for idx, arg in enumerate(positive_numbers):\n normalized_arg = arg / max_of_positive_numbers\n normalized_numbers.append(normalized_arg)\n print(sum(normalized_numbers))\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Generated by Django 3.2.6 on 2021-08-15 05:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tasks',
name='cleanlinessLevel',
field=models.IntegerField(),
),
]
|
normal
|
{
"blob_id": "6f9f204cbd6817d5e40f57e71614ad03b64d9003",
"index": 3152,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('website', '0001_initial')]\n operations = [migrations.AlterField(model_name='tasks', name=\n 'cleanlinessLevel', field=models.IntegerField())]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('website', '0001_initial')]\n operations = [migrations.AlterField(model_name='tasks', name=\n 'cleanlinessLevel', field=models.IntegerField())]\n",
"step-5": "# Generated by Django 3.2.6 on 2021-08-15 05:17\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tasks',\n name='cleanlinessLevel',\n field=models.IntegerField(),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@attach_common
class TalkBotThread(QThread):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@attach_common
class TalkBotThread(QThread):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
<|reserved_special_token_0|>
def output_bot_type(self):
parent = self.parent
msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot
), ' bot: {}'.format(parent.bot.__class__.__name__
), ' lister: {}'.format(parent.lister.__class__.__name__
), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@attach_common
class TalkBotThread(QThread):
parent = None
bot = None
lister = None
text_target = ''
tokens_start_of_text = []
tokens_of_text = []
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
def update_bot_type(self):
parent = self.parent
parent.is_bot_ready = False
parent.show_bot_not_ready_msg()
self.type_bot = parent.type_bot
self.select_token_list()
self.select_bot()
parent.bot = self.bot
parent.lister = self.lister
parent.is_bot_ready = True
parent.show_bot_ready_msg()
self.output_bot_type()
def output_bot_type(self):
parent = self.parent
msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot
), ' bot: {}'.format(parent.bot.__class__.__name__
), ' lister: {}'.format(parent.lister.__class__.__name__
), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import time
from PyQt5.QtCore import QThread
from common import attach_common
from database_downloader import DatabaseDownload
from ai_list_memorize import MemorizeList
from ai_list_morpheme import MorphemeList
from ai_list_ngram import NgramList
from ai_list_none import NoneList
from ai_bot_memorize import MemorizeBot
from ai_bot_morpheme import MorphemeBot
from ai_bot_ngram import NgramBot
from ai_bot_none import NoneBot
@attach_common
class TalkBotThread(QThread):
parent = None
bot = None
lister = None
text_target = ''
tokens_start_of_text = []
tokens_of_text = []
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
def update_bot_type(self):
parent = self.parent
parent.is_bot_ready = False
parent.show_bot_not_ready_msg()
self.type_bot = parent.type_bot
self.select_token_list()
self.select_bot()
parent.bot = self.bot
parent.lister = self.lister
parent.is_bot_ready = True
parent.show_bot_ready_msg()
self.output_bot_type()
def output_bot_type(self):
parent = self.parent
msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot
), ' bot: {}'.format(parent.bot.__class__.__name__
), ' lister: {}'.format(parent.lister.__class__.__name__
), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(num_of_gram=3, text_target=self.text_target
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(num_of_gram=self.config.
DISABLE_NGRAM, text_target=self.text_target)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 1:
self.bot = NgramBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 2:
self.bot = MorphemeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
if self.type_bot == 3:
self.bot = MemorizeBot(starting_token_list=self.
tokens_start_of_text, token_list=self.tokens_of_text)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
if __name__ == '__main__':
from gui_talkbot import MainWindow
TestClass = MainWindow
import sys
from PyQt5.QtWidgets import QApplication
qapp = QApplication(sys.argv)
window = TestClass()
window.show()
code = qapp.exec()
sys.exit(code)
<|reserved_special_token_1|>
import time
from PyQt5.QtCore import (
QThread,
)
from common import attach_common
from database_downloader import DatabaseDownload
from ai_list_memorize import MemorizeList
from ai_list_morpheme import MorphemeList
from ai_list_ngram import NgramList
from ai_list_none import NoneList
from ai_bot_memorize import MemorizeBot
from ai_bot_morpheme import MorphemeBot
from ai_bot_ngram import NgramBot
from ai_bot_none import NoneBot
@attach_common
class TalkBotThread(QThread):
parent = None
bot = None
lister = None
text_target = ''
tokens_start_of_text = []
tokens_of_text = []
def run(self):
self.start_database()
self.update_bot_type()
self.start_main_loop()
def start_database(self):
if self.config.ENABLED_DOWNLOAD:
DatabaseDownload.remove_tempfiles()
DatabaseDownload.set_url('')
DatabaseDownload.do_download_html()
else:
DatabaseDownload.do_not_download_html()
self.text_target = DatabaseDownload().get_outcome()
def update_bot_type(self):
parent = self.parent
parent.is_bot_ready = False
parent.show_bot_not_ready_msg()
self.type_bot = parent.type_bot
self.select_token_list()
self.select_bot()
parent.bot = self.bot
parent.lister = self.lister
parent.is_bot_ready = True
parent.show_bot_ready_msg()
self.output_bot_type()
def output_bot_type(self):
parent = self.parent
msgs = (
'TalkBot:',
' id: {}'.format(parent.type_bot),
' bot: {}'.format(parent.bot.__class__.__name__),
' lister: {}'.format(parent.lister.__class__.__name__),
' tokens: {}'.format(str(parent.lister.get_token_list())[:60]),
)
for msg in msgs:
self.logger.w(msg)
def select_token_list(self):
if self.type_bot == 0:
self.lister = NoneList(
num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 1:
self.lister = NgramList(
num_of_gram=3,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 2:
self.lister = MorphemeList(
num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
if self.type_bot == 3:
self.lister = MemorizeList(
num_of_gram=self.config.DISABLE_NGRAM,
text_target=self.text_target,
)
self.tokens_start_of_text = self.lister.get_starting_token_list()
self.tokens_of_text = self.lister.get_token_list()
return
err = self.type_bot
raise Exception(err)
def select_bot(self):
if self.type_bot == 0:
self.bot = NoneBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
if self.type_bot == 1:
self.bot = NgramBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
if self.type_bot == 2:
self.bot = MorphemeBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
if self.type_bot == 3:
self.bot = MemorizeBot(
starting_token_list=self.tokens_start_of_text,
token_list=self.tokens_of_text,
)
return
err = self.type_bot
raise Exception(err)
def start_main_loop(self):
parent = self.parent
while True:
time.sleep(0.2)
if parent.is_app_close:
break
if parent.type_bot != self.type_bot:
self.update_bot_type()
continue
parent.update_bot_msg_to_proper_latest_status()
msg = 'Stopped the database thread!'
self.logger.w(msg)
if __name__ == "__main__":
from gui_talkbot import MainWindow
TestClass = MainWindow
import sys
from PyQt5.QtWidgets import QApplication
qapp = QApplication(sys.argv)
window = TestClass()
window.show()
code = qapp.exec()
sys.exit(code)
|
flexible
|
{
"blob_id": "77763f501c6776969d2594f987e5d7ab7d4377fb",
"index": 317,
"step-1": "<mask token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n <mask token>\n <mask token>\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n <mask token>\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\n<mask token>\n",
"step-4": "import time\nfrom PyQt5.QtCore import QThread\nfrom common import attach_common\nfrom database_downloader import DatabaseDownload\nfrom ai_list_memorize import MemorizeList\nfrom ai_list_morpheme import MorphemeList\nfrom ai_list_ngram import NgramList\nfrom ai_list_none import NoneList\nfrom ai_bot_memorize import MemorizeBot\nfrom ai_bot_morpheme import MorphemeBot\nfrom ai_bot_ngram import NgramBot\nfrom ai_bot_none import NoneBot\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = 'TalkBot:', ' id: {}'.format(parent.type_bot\n ), ' bot: {}'.format(parent.bot.__class__.__name__\n ), ' lister: {}'.format(parent.lister.__class__.__name__\n ), ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 1:\n self.lister = NgramList(num_of_gram=3, text_target=self.text_target\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 2:\n self.lister = MorphemeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n if self.type_bot == 3:\n self.lister = MemorizeList(num_of_gram=self.config.\n DISABLE_NGRAM, text_target=self.text_target)\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 1:\n self.bot = NgramBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 2:\n self.bot = MorphemeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n if self.type_bot == 3:\n self.bot = MemorizeBot(starting_token_list=self.\n tokens_start_of_text, token_list=self.tokens_of_text)\n return\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n if parent.is_app_close:\n break\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n parent.update_bot_msg_to_proper_latest_status()\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\nif __name__ == '__main__':\n from gui_talkbot import MainWindow\n TestClass = MainWindow\n import sys\n from PyQt5.QtWidgets import QApplication\n qapp = QApplication(sys.argv)\n window = TestClass()\n window.show()\n code = qapp.exec()\n sys.exit(code)\n",
"step-5": "import time\nfrom PyQt5.QtCore import (\n QThread,\n)\nfrom common import attach_common\nfrom database_downloader import DatabaseDownload\nfrom ai_list_memorize import MemorizeList\nfrom ai_list_morpheme import MorphemeList\nfrom ai_list_ngram import NgramList\nfrom ai_list_none import NoneList\nfrom ai_bot_memorize import MemorizeBot\nfrom ai_bot_morpheme import MorphemeBot\nfrom ai_bot_ngram import NgramBot\nfrom ai_bot_none import NoneBot\n\n\n@attach_common\nclass TalkBotThread(QThread):\n parent = None\n bot = None\n lister = None\n\n text_target = ''\n tokens_start_of_text = []\n tokens_of_text = []\n\n def run(self):\n self.start_database()\n self.update_bot_type()\n self.start_main_loop()\n\n def start_database(self):\n if self.config.ENABLED_DOWNLOAD:\n DatabaseDownload.remove_tempfiles()\n DatabaseDownload.set_url('')\n DatabaseDownload.do_download_html()\n else:\n DatabaseDownload.do_not_download_html()\n self.text_target = DatabaseDownload().get_outcome()\n\n def update_bot_type(self):\n parent = self.parent\n parent.is_bot_ready = False\n parent.show_bot_not_ready_msg()\n\n self.type_bot = parent.type_bot\n self.select_token_list()\n self.select_bot()\n\n parent.bot = self.bot\n parent.lister = self.lister\n parent.is_bot_ready = True\n parent.show_bot_ready_msg()\n self.output_bot_type()\n\n def output_bot_type(self):\n parent = self.parent\n msgs = (\n 'TalkBot:',\n ' id: {}'.format(parent.type_bot),\n ' bot: {}'.format(parent.bot.__class__.__name__),\n ' lister: {}'.format(parent.lister.__class__.__name__),\n ' tokens: {}'.format(str(parent.lister.get_token_list())[:60]),\n )\n for msg in msgs:\n self.logger.w(msg)\n\n def select_token_list(self):\n if self.type_bot == 0:\n self.lister = NoneList(\n num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n if self.type_bot == 1:\n self.lister = NgramList(\n num_of_gram=3,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n if self.type_bot == 2:\n self.lister = MorphemeList(\n num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n if self.type_bot == 3:\n self.lister = MemorizeList(\n num_of_gram=self.config.DISABLE_NGRAM,\n text_target=self.text_target,\n )\n self.tokens_start_of_text = self.lister.get_starting_token_list()\n self.tokens_of_text = self.lister.get_token_list()\n return\n\n err = self.type_bot\n raise Exception(err)\n\n def select_bot(self):\n if self.type_bot == 0:\n self.bot = NoneBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n if self.type_bot == 1:\n self.bot = NgramBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n if self.type_bot == 2:\n self.bot = MorphemeBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n if self.type_bot == 3:\n self.bot = MemorizeBot(\n starting_token_list=self.tokens_start_of_text,\n token_list=self.tokens_of_text,\n )\n return\n\n err = self.type_bot\n raise Exception(err)\n\n def start_main_loop(self):\n parent = self.parent\n while True:\n time.sleep(0.2)\n\n if parent.is_app_close:\n break\n\n if parent.type_bot != self.type_bot:\n self.update_bot_type()\n continue\n\n parent.update_bot_msg_to_proper_latest_status()\n\n msg = 'Stopped the database thread!'\n self.logger.w(msg)\n\n\nif __name__ == \"__main__\":\n from gui_talkbot import MainWindow\n TestClass = MainWindow\n\n import sys\n from PyQt5.QtWidgets import QApplication\n qapp = QApplication(sys.argv)\n window = TestClass()\n window.show()\n code = qapp.exec()\n sys.exit(code)\n",
"step-ids": [
6,
7,
9,
11,
12
]
}
|
[
6,
7,
9,
11,
12
] |
<|reserved_special_token_0|>
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.
ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = '__all__'
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={
'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published',
'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,
SensitiveAreaAPISerializer):
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,
precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'
,)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_period(self, obj):
return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.
ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = '__all__'
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={
'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published',
'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,
SensitiveAreaAPISerializer):
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,
precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'
,)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
practices = SportPracticeSerializer(many=True)
period = rest_serializers.SerializerMethodField()
def get_period(self, obj):
return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.
ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = '__all__'
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={
'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published',
'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,
SensitiveAreaAPISerializer):
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,
precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'
,)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SportPracticeSerializer(TranslatedModelSerializer):
class Meta:
model = sensitivity_models.SportPractice
fields = 'id', 'name'
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
practices = SportPracticeSerializer(many=True)
period = rest_serializers.SerializerMethodField()
def get_period(self, obj):
return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.
ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = '__all__'
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={
'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published',
'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,
SensitiveAreaAPISerializer):
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,
precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'
,)
<|reserved_special_token_1|>
from django.urls import reverse
from django.utils.translation import get_language
from drf_dynamic_fields import DynamicFieldsMixin
from geotrek.api.v2.serializers import AttachmentSerializer
from mapentity.serializers import MapentityGeojsonModelSerializer
from rest_framework import serializers as rest_serializers
from rest_framework_gis import fields as rest_gis_fields
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from geotrek.common.serializers import PictogramSerializerMixin, TranslatedModelSerializer
from . import models as sensitivity_models
class RuleSerializer(PictogramSerializerMixin, rest_serializers.ModelSerializer):
class Meta:
model = sensitivity_models.Rule
fields = ('id', 'code', 'name', 'pictogram', 'description', 'url')
class SportPracticeSerializer(TranslatedModelSerializer):
class Meta:
model = sensitivity_models.SportPractice
fields = ('id', 'name')
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
practices = SportPracticeSerializer(many=True)
period = rest_serializers.SerializerMethodField()
def get_period(self, obj):
return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = "__all__"
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published', 'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer, SensitiveAreaAPISerializer):
# Annotated geom field with API_SRID
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True, precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed', )
|
flexible
|
{
"blob_id": "dfd5915428dc8f15fb61c5d81f22dfecfe29af15",
"index": 6409,
"step-1": "<mask token>\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-2": "<mask token>\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n <mask token>\n <mask token>\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-3": "<mask token>\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n practices = SportPracticeSerializer(many=True)\n period = rest_serializers.SerializerMethodField()\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-4": "<mask token>\n\n\nclass SportPracticeSerializer(TranslatedModelSerializer):\n\n\n class Meta:\n model = sensitivity_models.SportPractice\n fields = 'id', 'name'\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n practices = SportPracticeSerializer(many=True)\n period = rest_serializers.SerializerMethodField()\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-5": "from django.urls import reverse\nfrom django.utils.translation import get_language\nfrom drf_dynamic_fields import DynamicFieldsMixin\nfrom geotrek.api.v2.serializers import AttachmentSerializer\nfrom mapentity.serializers import MapentityGeojsonModelSerializer\nfrom rest_framework import serializers as rest_serializers\nfrom rest_framework_gis import fields as rest_gis_fields\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\n\nfrom geotrek.common.serializers import PictogramSerializerMixin, TranslatedModelSerializer\nfrom . import models as sensitivity_models\n\n\nclass RuleSerializer(PictogramSerializerMixin, rest_serializers.ModelSerializer):\n\n class Meta:\n model = sensitivity_models.Rule\n fields = ('id', 'code', 'name', 'pictogram', 'description', 'url')\n\n\nclass SportPracticeSerializer(TranslatedModelSerializer):\n class Meta:\n model = sensitivity_models.SportPractice\n fields = ('id', 'name')\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n practices = SportPracticeSerializer(many=True)\n period = rest_serializers.SerializerMethodField()\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = \"__all__\"\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk})\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published', 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer, SensitiveAreaAPISerializer):\n # Annotated geom field with API_SRID\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True, precision=7)\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed', )\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
from abc import ABCMeta, abstractmethod
__author__ = 'Alexiy'
class Protocol:
"""base protocol class"""
__metaclass__ = ABCMeta
FAIL = 'Failed'
@abstractmethod
def execute(self, command):
""""execute command method"""
class LocalProtocol(Protocol):
"""simple protocol for using bots within app"""
def __init__(self, command_executor):
self._command_executor = command_executor
def execute(self, command):
if not self._command_executor.has_executor(command.name):
return Protocol.FAIL
try:
result = self._command_executor.execute(command)
except:
result = Protocol.FAIL
return result
Protocol.register(LocalProtocol)
|
normal
|
{
"blob_id": "8d1067a9bb0629276ef27de91f63cf2370a44e24",
"index": 1369,
"step-1": "<mask token>\n\n\nclass Protocol:\n <mask token>\n <mask token>\n <mask token>\n\n @abstractmethod\n def execute(self, command):\n \"\"\"\"execute command method\"\"\"\n\n\nclass LocalProtocol(Protocol):\n \"\"\"simple protocol for using bots within app\"\"\"\n\n def __init__(self, command_executor):\n self._command_executor = command_executor\n\n def execute(self, command):\n if not self._command_executor.has_executor(command.name):\n return Protocol.FAIL\n try:\n result = self._command_executor.execute(command)\n except:\n result = Protocol.FAIL\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Protocol:\n <mask token>\n __metaclass__ = ABCMeta\n FAIL = 'Failed'\n\n @abstractmethod\n def execute(self, command):\n \"\"\"\"execute command method\"\"\"\n\n\nclass LocalProtocol(Protocol):\n \"\"\"simple protocol for using bots within app\"\"\"\n\n def __init__(self, command_executor):\n self._command_executor = command_executor\n\n def execute(self, command):\n if not self._command_executor.has_executor(command.name):\n return Protocol.FAIL\n try:\n result = self._command_executor.execute(command)\n except:\n result = Protocol.FAIL\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Protocol:\n \"\"\"base protocol class\"\"\"\n __metaclass__ = ABCMeta\n FAIL = 'Failed'\n\n @abstractmethod\n def execute(self, command):\n \"\"\"\"execute command method\"\"\"\n\n\nclass LocalProtocol(Protocol):\n \"\"\"simple protocol for using bots within app\"\"\"\n\n def __init__(self, command_executor):\n self._command_executor = command_executor\n\n def execute(self, command):\n if not self._command_executor.has_executor(command.name):\n return Protocol.FAIL\n try:\n result = self._command_executor.execute(command)\n except:\n result = Protocol.FAIL\n return result\n\n\n<mask token>\n",
"step-4": "from abc import ABCMeta, abstractmethod\n__author__ = 'Alexiy'\n\n\nclass Protocol:\n \"\"\"base protocol class\"\"\"\n __metaclass__ = ABCMeta\n FAIL = 'Failed'\n\n @abstractmethod\n def execute(self, command):\n \"\"\"\"execute command method\"\"\"\n\n\nclass LocalProtocol(Protocol):\n \"\"\"simple protocol for using bots within app\"\"\"\n\n def __init__(self, command_executor):\n self._command_executor = command_executor\n\n def execute(self, command):\n if not self._command_executor.has_executor(command.name):\n return Protocol.FAIL\n try:\n result = self._command_executor.execute(command)\n except:\n result = Protocol.FAIL\n return result\n\n\nProtocol.register(LocalProtocol)\n",
"step-5": null,
"step-ids": [
6,
7,
8,
11
]
}
|
[
6,
7,
8,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
if len(sys.argv) < 3:
sys.stderr.write(
'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\n' % sys.argv[0])
sys.exit(1)
config_uri = sys.argv.pop(1)
if sys.argv[1] == 'bootstrap':
bootstrap_db(config_uri)
else:
engine = create_engine(config_uri)
db.configure(bind=engine)
context = MigrationContext.configure(engine.connect())
db_version = context.get_current_revision()
if not db_version:
sys.stderr.write(
"""Database not initialized.
Try this: "sortie-db-manage %s bootstrap\"
"""
% config_uri)
sys.exit(2)
cmd = ['alembic', '-c', config_uri] + sys.argv[1:]
print(subprocess.check_output(cmd))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import subprocess
import sys
from alembic.migration import MigrationContext
from ..lib.alembic import bootstrap_db
from ..lib.sqla import create_engine
from ..models import DBSession as db
def main():
if len(sys.argv) < 3:
sys.stderr.write(
'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\n' % sys.argv[0])
sys.exit(1)
config_uri = sys.argv.pop(1)
if sys.argv[1] == 'bootstrap':
bootstrap_db(config_uri)
else:
engine = create_engine(config_uri)
db.configure(bind=engine)
context = MigrationContext.configure(engine.connect())
db_version = context.get_current_revision()
if not db_version:
sys.stderr.write(
"""Database not initialized.
Try this: "sortie-db-manage %s bootstrap\"
"""
% config_uri)
sys.exit(2)
cmd = ['alembic', '-c', config_uri] + sys.argv[1:]
print(subprocess.check_output(cmd))
<|reserved_special_token_1|>
"""Wrapper over the command line migrate tool to better work with
config files."""
import subprocess
import sys
from alembic.migration import MigrationContext
from ..lib.alembic import bootstrap_db
from ..lib.sqla import create_engine
from ..models import DBSession as db
def main():
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\n'
% sys.argv[0])
sys.exit(1)
config_uri = sys.argv.pop(1)
if sys.argv[1] == 'bootstrap':
bootstrap_db(config_uri)
else:
engine = create_engine(config_uri)
db.configure(bind=engine)
context = MigrationContext.configure(engine.connect())
db_version = context.get_current_revision()
if not db_version:
sys.stderr.write('Database not initialized.\n'
'Try this: "sortie-db-manage %s bootstrap"\n'
% config_uri)
sys.exit(2)
cmd = ['alembic', '-c', config_uri] + sys.argv[1:]
print(subprocess.check_output(cmd))
|
flexible
|
{
"blob_id": "7b459cf321f351e1485a9aef0ca23067f411e430",
"index": 7446,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write(\n 'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n' % sys.argv[0])\n sys.exit(1)\n config_uri = sys.argv.pop(1)\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n if not db_version:\n sys.stderr.write(\n \"\"\"Database not initialized.\nTry this: \"sortie-db-manage %s bootstrap\\\"\n\"\"\"\n % config_uri)\n sys.exit(2)\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n print(subprocess.check_output(cmd))\n",
"step-3": "<mask token>\nimport subprocess\nimport sys\nfrom alembic.migration import MigrationContext\nfrom ..lib.alembic import bootstrap_db\nfrom ..lib.sqla import create_engine\nfrom ..models import DBSession as db\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write(\n 'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n' % sys.argv[0])\n sys.exit(1)\n config_uri = sys.argv.pop(1)\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n if not db_version:\n sys.stderr.write(\n \"\"\"Database not initialized.\nTry this: \"sortie-db-manage %s bootstrap\\\"\n\"\"\"\n % config_uri)\n sys.exit(2)\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n print(subprocess.check_output(cmd))\n",
"step-4": "\"\"\"Wrapper over the command line migrate tool to better work with\nconfig files.\"\"\"\n\nimport subprocess\nimport sys\n\nfrom alembic.migration import MigrationContext\n\nfrom ..lib.alembic import bootstrap_db\nfrom ..lib.sqla import create_engine\nfrom ..models import DBSession as db\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n'\n % sys.argv[0])\n sys.exit(1)\n\n config_uri = sys.argv.pop(1)\n\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n\n if not db_version:\n sys.stderr.write('Database not initialized.\\n'\n 'Try this: \"sortie-db-manage %s bootstrap\"\\n'\n % config_uri)\n sys.exit(2)\n\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n\n print(subprocess.check_output(cmd))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def parseSeq(lines, seqName):
"""splits each column"""
data = []
for line in lines:
data.append(line.split(' '))
"""removes any spaces"""
for i in range(len(data)):
for j in range(data[i].count('')):
data[i].remove('')
"""deletes the numbers at beginning of column"""
for i in range(len(data)):
del data[i][0]
"""creates a list of lists from dna sequence"""
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parseSeq(lines, seqName):
"""splits each column"""
data = []
for line in lines:
data.append(line.split(' '))
"""removes any spaces"""
for i in range(len(data)):
for j in range(data[i].count('')):
data[i].remove('')
"""deletes the numbers at beginning of column"""
for i in range(len(data)):
del data[i][0]
"""creates a list of lists from dna sequence"""
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
<|reserved_special_token_0|>
for i in range(seqs):
print('What is the name of DNA sequence', i + 1, end='? ')
name = input('')
file = open(name + '.txt')
info = file.readlines()
masterList.append(parseSeq(info, name))
file.close()
<|reserved_special_token_0|>
for i in range(len(masterList)):
elems.append(len(masterList[i]))
<|reserved_special_token_0|>
for row in range(len(masterList[bigElem])):
for seq in range(len(masterList)):
try:
ws.append(masterList[seq][row])
except IndexError:
ws.append([])
ws.append([])
wb.save(saveFile + '.xlsx')
<|reserved_special_token_0|>
if match == 'y':
wb = opx.load_workbook(saveFile + '.xlsx')
sheet = wb['Sheet']
ws = wb.active
red = 'FFFF0000'
green = '0000FF00'
blue = 'FF0000FF'
greenFill = PatternFill(start_color=green, end_color=green, fill_type=
'solid')
redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')
blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')
ws['BK1'] = 'Matched'
ws['BK1'].fill = greenFill
ws['BK2'] = 'Unmatched'
ws['BK2'].fill = blueFill
lastRow = sheet.max_row + 1
end = int(lastRow / (seqs + 1))
for section in range(end):
startSec = (seqs + 1) * section + 1
endSec = (seqs + 1) * section + (seqs + 1)
for col in range(2, 62):
bp = []
for row in range(startSec, endSec):
cell = sheet.cell(row=row, column=col).value
bp.append(cell)
if bp.count(bp[0]) == seqs:
for row in range(startSec, endSec):
sheet.cell(row=row, column=col).fill = greenFill
else:
for row in range(startSec, endSec):
sheet.cell(row=row, column=col).fill = blueFill
wb.save(saveFile + '.xlsx')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
wb = Workbook(write_only=True)
ws = wb.create_sheet()
def parseSeq(lines, seqName):
"""splits each column"""
data = []
for line in lines:
data.append(line.split(' '))
"""removes any spaces"""
for i in range(len(data)):
for j in range(data[i].count('')):
data[i].remove('')
"""deletes the numbers at beginning of column"""
for i in range(len(data)):
del data[i][0]
"""creates a list of lists from dna sequence"""
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
seqs = int(input('How many DNA sequences do you want to compare? '))
saveFile = input('What do you want to name the spreadsheet? ')
<|reserved_special_token_0|>
masterList = []
<|reserved_special_token_0|>
for i in range(seqs):
print('What is the name of DNA sequence', i + 1, end='? ')
name = input('')
file = open(name + '.txt')
info = file.readlines()
masterList.append(parseSeq(info, name))
file.close()
<|reserved_special_token_0|>
elems = []
for i in range(len(masterList)):
elems.append(len(masterList[i]))
bigElem = elems.index(max(elems))
<|reserved_special_token_0|>
for row in range(len(masterList[bigElem])):
for seq in range(len(masterList)):
try:
ws.append(masterList[seq][row])
except IndexError:
ws.append([])
ws.append([])
wb.save(saveFile + '.xlsx')
<|reserved_special_token_0|>
match = input('Do you want to color match your sequence (y/n)? ')
if match == 'y':
wb = opx.load_workbook(saveFile + '.xlsx')
sheet = wb['Sheet']
ws = wb.active
red = 'FFFF0000'
green = '0000FF00'
blue = 'FF0000FF'
greenFill = PatternFill(start_color=green, end_color=green, fill_type=
'solid')
redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')
blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')
ws['BK1'] = 'Matched'
ws['BK1'].fill = greenFill
ws['BK2'] = 'Unmatched'
ws['BK2'].fill = blueFill
lastRow = sheet.max_row + 1
end = int(lastRow / (seqs + 1))
for section in range(end):
startSec = (seqs + 1) * section + 1
endSec = (seqs + 1) * section + (seqs + 1)
for col in range(2, 62):
bp = []
for row in range(startSec, endSec):
cell = sheet.cell(row=row, column=col).value
bp.append(cell)
if bp.count(bp[0]) == seqs:
for row in range(startSec, endSec):
sheet.cell(row=row, column=col).fill = greenFill
else:
for row in range(startSec, endSec):
sheet.cell(row=row, column=col).fill = blueFill
wb.save(saveFile + '.xlsx')
<|reserved_special_token_1|>
import openpyxl as opx
import pyperclip
from openpyxl import Workbook
from openpyxl.styles import PatternFill
wb = Workbook(write_only=True)
ws = wb.create_sheet()
def parseSeq(lines, seqName):
"""splits each column"""
data = []
for line in lines:
data.append(line.split(' '))
"""removes any spaces"""
for i in range(len(data)):
for j in range(data[i].count('')):
data[i].remove('')
"""deletes the numbers at beginning of column"""
for i in range(len(data)):
del data[i][0]
"""creates a list of lists from dna sequence"""
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
seqs = int(input('How many DNA sequences do you want to compare? '))
saveFile = input('What do you want to name the spreadsheet? ')
<|reserved_special_token_0|>
masterList = []
<|reserved_special_token_0|>
for i in range(seqs):
print('What is the name of DNA sequence', i + 1, end='? ')
name = input('')
file = open(name + '.txt')
info = file.readlines()
masterList.append(parseSeq(info, name))
file.close()
<|reserved_special_token_0|>
elems = []
for i in range(len(masterList)):
elems.append(len(masterList[i]))
bigElem = elems.index(max(elems))
<|reserved_special_token_0|>
for row in range(len(masterList[bigElem])):
for seq in range(len(masterList)):
try:
ws.append(masterList[seq][row])
except IndexError:
ws.append([])
ws.append([])
wb.save(saveFile + '.xlsx')
<|reserved_special_token_0|>
match = input('Do you want to color match your sequence (y/n)? ')
if match == 'y':
wb = opx.load_workbook(saveFile + '.xlsx')
sheet = wb['Sheet']
ws = wb.active
red = 'FFFF0000'
green = '0000FF00'
blue = 'FF0000FF'
greenFill = PatternFill(start_color=green, end_color=green, fill_type=
'solid')
redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')
blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')
ws['BK1'] = 'Matched'
ws['BK1'].fill = greenFill
ws['BK2'] = 'Unmatched'
ws['BK2'].fill = blueFill
lastRow = sheet.max_row + 1
end = int(lastRow / (seqs + 1))
for section in range(end):
startSec = (seqs + 1) * section + 1
endSec = (seqs + 1) * section + (seqs + 1)
for col in range(2, 62):
bp = []
for row in range(startSec, endSec):
cell = sheet.cell(row=row, column=col).value
bp.append(cell)
if bp.count(bp[0]) == seqs:
for row in range(startSec, endSec):
sheet.cell(row=row, column=col).fill = greenFill
else:
for row in range(startSec, endSec):
sheet.cell(row=row, column=col).fill = blueFill
wb.save(saveFile + '.xlsx')
<|reserved_special_token_1|>
import openpyxl as opx
import pyperclip
from openpyxl import Workbook
from openpyxl.styles import PatternFill
wb = Workbook(write_only=True)
ws = wb.create_sheet()
def parseSeq(lines,seqName):
'''splits each column'''
data = []
for line in lines: data.append(line.split(' '))
'''removes any spaces'''
for i in range(len(data)):
for j in range(data[i].count('')): data[i].remove('')
'''deletes the numbers at beginning of column'''
for i in range(len(data)): del data[i][0]
'''creates a list of lists from dna sequence'''
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
seqs = int(input('How many DNA sequences do you want to compare? '))
saveFile = input('What do you want to name the spreadsheet? ')
'''masterList contains each sequence, and each sequence is
broken into rows'''
masterList = []
'''reads files so they can be parsed'''
for i in range(seqs):
print('What is the name of DNA sequence',i+1,end='? ')
name = input('')
file = open(name+'.txt')
info = file.readlines()
masterList.append(parseSeq(info,name))
file.close()
'''sequence that contains the most rows is used for following loop'''
elems = []
for i in range(len(masterList)): elems.append(len(masterList[i]))
bigElem = elems.index(max(elems))
'''adds dna sequence to excel spreadsheet, 60 columns, x rows'''
for row in range(len(masterList[bigElem])):
for seq in range(len(masterList)):
try:
ws.append(masterList[seq][row])
except IndexError:
ws.append([])
ws.append([])
wb.save(saveFile+'.xlsx')
'''color match'''
match = input('Do you want to color match your sequence (y/n)? ')
if match == 'y':
wb = opx.load_workbook(saveFile+'.xlsx')
sheet = wb['Sheet']
ws = wb.active
red = 'FFFF0000'
green = '0000FF00'
blue = 'FF0000FF'
greenFill = PatternFill(start_color=green,
end_color=green,
fill_type='solid')
redFill = PatternFill(start_color=red,
end_color=red,
fill_type='solid')
blueFill = PatternFill(start_color=blue,
end_color=blue,
fill_type='solid')
ws['BK1'] = 'Matched'
ws['BK1'].fill = greenFill
ws['BK2'] = 'Unmatched'
ws['BK2'].fill = blueFill
lastRow = sheet.max_row + 1
end = int(lastRow / (seqs+1))
for section in range(end):
startSec = (seqs+1)*section + 1
endSec = (seqs+1)*section + (seqs+1)
for col in range(2,62):
bp = []
for row in range(startSec,endSec):
cell = sheet.cell(row=row,column=col).value
bp.append(cell)
if bp.count(bp[0]) == seqs:
for row in range(startSec,endSec):
sheet.cell(row=row,column=col).fill = greenFill
else:
for row in range(startSec,endSec):
sheet.cell(row=row,column=col).fill = blueFill
wb.save(saveFile+'.xlsx')
|
flexible
|
{
"blob_id": "19e387cb731dad21e5ee50b0a9812df984c13f3b",
"index": 7890,
"step-1": "<mask token>\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n",
"step-3": "<mask token>\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\nseqs = int(input('How many DNA sequences do you want to compare? '))\nsaveFile = input('What do you want to name the spreadsheet? ')\n<mask token>\nmasterList = []\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nelems = []\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\nbigElem = elems.index(max(elems))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nmatch = input('Do you want to color match your sequence (y/n)? ')\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n",
"step-4": "import openpyxl as opx\nimport pyperclip\nfrom openpyxl import Workbook\nfrom openpyxl.styles import PatternFill\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\nseqs = int(input('How many DNA sequences do you want to compare? '))\nsaveFile = input('What do you want to name the spreadsheet? ')\n<mask token>\nmasterList = []\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nelems = []\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\nbigElem = elems.index(max(elems))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nmatch = input('Do you want to color match your sequence (y/n)? ')\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n",
"step-5": "import openpyxl as opx\r\nimport pyperclip\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.styles import PatternFill\r\nwb = Workbook(write_only=True)\r\nws = wb.create_sheet()\r\n\r\n\r\ndef parseSeq(lines,seqName):\r\n \r\n '''splits each column'''\r\n data = []\r\n for line in lines: data.append(line.split(' '))\r\n '''removes any spaces'''\r\n for i in range(len(data)):\r\n for j in range(data[i].count('')): data[i].remove('')\r\n '''deletes the numbers at beginning of column'''\r\n for i in range(len(data)): del data[i][0]\r\n '''creates a list of lists from dna sequence'''\r\n seqRows = []\r\n for i in range(len(data)):\r\n seqRow = []\r\n seqRow.append(seqName)\r\n for j in range(len(data[i])):\r\n for k in range(len(data[i][j])):\r\n seqRow.append(data[i][j][k])\r\n seqRows.append(seqRow) \r\n return seqRows\r\n\r\nseqs = int(input('How many DNA sequences do you want to compare? '))\r\nsaveFile = input('What do you want to name the spreadsheet? ')\r\n\r\n'''masterList contains each sequence, and each sequence is\r\n broken into rows'''\r\nmasterList = []\r\n'''reads files so they can be parsed'''\r\nfor i in range(seqs):\r\n print('What is the name of DNA sequence',i+1,end='? ')\r\n name = input('')\r\n file = open(name+'.txt')\r\n info = file.readlines()\r\n masterList.append(parseSeq(info,name))\r\n file.close()\r\n\r\n'''sequence that contains the most rows is used for following loop'''\r\nelems = []\r\nfor i in range(len(masterList)): elems.append(len(masterList[i]))\r\nbigElem = elems.index(max(elems))\r\n \r\n'''adds dna sequence to excel spreadsheet, 60 columns, x rows'''\r\nfor row in range(len(masterList[bigElem])):\r\n for seq in range(len(masterList)):\r\n try:\r\n ws.append(masterList[seq][row])\r\n except IndexError:\r\n ws.append([])\r\n ws.append([])\r\n \r\nwb.save(saveFile+'.xlsx')\r\n\r\n'''color match'''\r\nmatch = input('Do you want to color match your sequence (y/n)? ')\r\nif match == 'y':\r\n wb = opx.load_workbook(saveFile+'.xlsx')\r\n sheet = wb['Sheet']\r\n ws = wb.active\r\n\r\n\r\n red = 'FFFF0000'\r\n green = '0000FF00'\r\n blue = 'FF0000FF'\r\n\r\n greenFill = PatternFill(start_color=green,\r\n end_color=green,\r\n fill_type='solid')\r\n redFill = PatternFill(start_color=red,\r\n end_color=red,\r\n fill_type='solid')\r\n blueFill = PatternFill(start_color=blue,\r\n end_color=blue,\r\n fill_type='solid')\r\n\r\n\r\n ws['BK1'] = 'Matched'\r\n ws['BK1'].fill = greenFill\r\n ws['BK2'] = 'Unmatched'\r\n ws['BK2'].fill = blueFill\r\n\r\n lastRow = sheet.max_row + 1\r\n end = int(lastRow / (seqs+1))\r\n\r\n for section in range(end):\r\n startSec = (seqs+1)*section + 1\r\n endSec = (seqs+1)*section + (seqs+1)\r\n for col in range(2,62):\r\n bp = []\r\n for row in range(startSec,endSec):\r\n cell = sheet.cell(row=row,column=col).value\r\n bp.append(cell)\r\n if bp.count(bp[0]) == seqs:\r\n for row in range(startSec,endSec):\r\n sheet.cell(row=row,column=col).fill = greenFill\r\n else:\r\n for row in range(startSec,endSec):\r\n sheet.cell(row=row,column=col).fill = blueFill\r\n wb.save(saveFile+'.xlsx')\r\n\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
Created on 3 Jul 2009
@author: charanpal
An abstract base class which represents a graph generator. The graph generator
takes an existing empty graph and produces edges over it.
'''
from apgl.util.Util import Util
class AbstractGraphGenerator(object):
def generate(self, graph):
Util.abstract()
|
normal
|
{
"blob_id": "e37e468d8a41b8711fb0eb4ddec7db67691f9156",
"index": 488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AbstractGraphGenerator(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AbstractGraphGenerator(object):\n\n def generate(self, graph):\n Util.abstract()\n",
"step-4": "<mask token>\nfrom apgl.util.Util import Util\n\n\nclass AbstractGraphGenerator(object):\n\n def generate(self, graph):\n Util.abstract()\n",
"step-5": "'''\r\nCreated on 3 Jul 2009\r\n\r\n@author: charanpal\r\n\r\nAn abstract base class which represents a graph generator. The graph generator\r\ntakes an existing empty graph and produces edges over it. \r\n'''\r\nfrom apgl.util.Util import Util\r\n\r\nclass AbstractGraphGenerator(object):\r\n def generate(self, graph):\r\n Util.abstract() ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#配置我们文件所在目录的搜寻环境
import os,sys
#第一步先拿到当前文件的路径
file_path = os.path.abspath(__file__)
#第二步 根据这个路径去拿到这个文件所在目录的路径
dir_path = os.path.dirname(file_path)
#第三步:讲这个目录的路径添加到我们的搜寻环境当中
sys.path.append(dir_path)
#第四步,动态设置我们的setting文件
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gulishop.settings")
#第五步,让设置好的环境初始化生效
import django
django.setup()
#这一行,不能放在上面
from goods.models import GoodsCategory
from db_tools.data.category_data import row_data
for lev1 in row_data:
cat1 = GoodsCategory()
cat1.name = lev1['name']
cat1.code = lev1['code'] if lev1['code'] else ''
cat1.category_type = 1
cat1.save()
for lev2 in lev1['sub_categorys']:
cat2 = GoodsCategory()
cat2.name = lev2['name']
cat2.code = lev2['code'] if lev2['code'] else ''
cat2.category_type = 2
cat2.parent_category = cat1
cat2.save()
for lev3 in lev2['sub_categorys']:
cat3 = GoodsCategory()
cat3.name = lev3['name']
cat3.code = lev3['code'] if lev3['code'] else ''
cat3.category_type = 3
cat3.parent_category = cat2
cat3.save()
|
normal
|
{
"blob_id": "35ae9c86594b50bbe4a67d2cc6b20efc6f6fdc64",
"index": 295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(dir_path)\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor lev1 in row_data:\n cat1 = GoodsCategory()\n cat1.name = lev1['name']\n cat1.code = lev1['code'] if lev1['code'] else ''\n cat1.category_type = 1\n cat1.save()\n for lev2 in lev1['sub_categorys']:\n cat2 = GoodsCategory()\n cat2.name = lev2['name']\n cat2.code = lev2['code'] if lev2['code'] else ''\n cat2.category_type = 2\n cat2.parent_category = cat1\n cat2.save()\n for lev3 in lev2['sub_categorys']:\n cat3 = GoodsCategory()\n cat3.name = lev3['name']\n cat3.code = lev3['code'] if lev3['code'] else ''\n cat3.category_type = 3\n cat3.parent_category = cat2\n cat3.save()\n",
"step-3": "<mask token>\nfile_path = os.path.abspath(__file__)\ndir_path = os.path.dirname(file_path)\nsys.path.append(dir_path)\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor lev1 in row_data:\n cat1 = GoodsCategory()\n cat1.name = lev1['name']\n cat1.code = lev1['code'] if lev1['code'] else ''\n cat1.category_type = 1\n cat1.save()\n for lev2 in lev1['sub_categorys']:\n cat2 = GoodsCategory()\n cat2.name = lev2['name']\n cat2.code = lev2['code'] if lev2['code'] else ''\n cat2.category_type = 2\n cat2.parent_category = cat1\n cat2.save()\n for lev3 in lev2['sub_categorys']:\n cat3 = GoodsCategory()\n cat3.name = lev3['name']\n cat3.code = lev3['code'] if lev3['code'] else ''\n cat3.category_type = 3\n cat3.parent_category = cat2\n cat3.save()\n",
"step-4": "import os, sys\nfile_path = os.path.abspath(__file__)\ndir_path = os.path.dirname(file_path)\nsys.path.append(dir_path)\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')\nimport django\ndjango.setup()\nfrom goods.models import GoodsCategory\nfrom db_tools.data.category_data import row_data\nfor lev1 in row_data:\n cat1 = GoodsCategory()\n cat1.name = lev1['name']\n cat1.code = lev1['code'] if lev1['code'] else ''\n cat1.category_type = 1\n cat1.save()\n for lev2 in lev1['sub_categorys']:\n cat2 = GoodsCategory()\n cat2.name = lev2['name']\n cat2.code = lev2['code'] if lev2['code'] else ''\n cat2.category_type = 2\n cat2.parent_category = cat1\n cat2.save()\n for lev3 in lev2['sub_categorys']:\n cat3 = GoodsCategory()\n cat3.name = lev3['name']\n cat3.code = lev3['code'] if lev3['code'] else ''\n cat3.category_type = 3\n cat3.parent_category = cat2\n cat3.save()\n",
"step-5": "#配置我们文件所在目录的搜寻环境\r\nimport os,sys\r\n#第一步先拿到当前文件的路径\r\nfile_path = os.path.abspath(__file__)\r\n#第二步 根据这个路径去拿到这个文件所在目录的路径\r\ndir_path = os.path.dirname(file_path)\r\n#第三步:讲这个目录的路径添加到我们的搜寻环境当中\r\nsys.path.append(dir_path)\r\n#第四步,动态设置我们的setting文件\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"gulishop.settings\")\r\n#第五步,让设置好的环境初始化生效\r\nimport django\r\ndjango.setup()\r\n\r\n#这一行,不能放在上面\r\nfrom goods.models import GoodsCategory\r\nfrom db_tools.data.category_data import row_data\r\n\r\nfor lev1 in row_data:\r\n cat1 = GoodsCategory()\r\n cat1.name = lev1['name']\r\n cat1.code = lev1['code'] if lev1['code'] else ''\r\n cat1.category_type = 1\r\n cat1.save()\r\n for lev2 in lev1['sub_categorys']:\r\n cat2 = GoodsCategory()\r\n cat2.name = lev2['name']\r\n cat2.code = lev2['code'] if lev2['code'] else ''\r\n cat2.category_type = 2\r\n cat2.parent_category = cat1\r\n cat2.save()\r\n for lev3 in lev2['sub_categorys']:\r\n cat3 = GoodsCategory()\r\n cat3.name = lev3['name']\r\n cat3.code = lev3['code'] if lev3['code'] else ''\r\n cat3.category_type = 3\r\n cat3.parent_category = cat2\r\n cat3.save()\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main(arguments):
docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (
'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]
docker_username = os.environ.get('DOCKER_USERNAME', None)
if docker_username is None:
docker_username = input('docker hub user (DOCKER_USERNAME) ? ')
docker_password = os.environ.get('DOCKER_PASSWORD', None)
if docker_password is None:
docker_password = getpass.getpass(
'docker hub password (DOCKER_PASSWORD) ? ')
_system('docker login -u {0} -p {1}'.format(docker_username,
docker_password), logged=False)
for docker_file, docker_image in docker:
_system('docker build -f {0} -t {1} {2}'.format(docker_file,
docker_image, ROOT_DIR))
for _, docker_image in docker:
_system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))
_system('docker push {1}/{0}'.format(docker_image, docker_username))
def _system(cmd, logged=True):
if logged:
print('$ {0}'.format(cmd))
if os.system(cmd) > 0:
raise OSError()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(arguments):
docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (
'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]
docker_username = os.environ.get('DOCKER_USERNAME', None)
if docker_username is None:
docker_username = input('docker hub user (DOCKER_USERNAME) ? ')
docker_password = os.environ.get('DOCKER_PASSWORD', None)
if docker_password is None:
docker_password = getpass.getpass(
'docker hub password (DOCKER_PASSWORD) ? ')
_system('docker login -u {0} -p {1}'.format(docker_username,
docker_password), logged=False)
for docker_file, docker_image in docker:
_system('docker build -f {0} -t {1} {2}'.format(docker_file,
docker_image, ROOT_DIR))
for _, docker_image in docker:
_system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))
_system('docker push {1}/{0}'.format(docker_image, docker_username))
def _system(cmd, logged=True):
if logged:
print('$ {0}'.format(cmd))
if os.system(cmd) > 0:
raise OSError()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))
ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
def main(arguments):
docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (
'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]
docker_username = os.environ.get('DOCKER_USERNAME', None)
if docker_username is None:
docker_username = input('docker hub user (DOCKER_USERNAME) ? ')
docker_password = os.environ.get('DOCKER_PASSWORD', None)
if docker_password is None:
docker_password = getpass.getpass(
'docker hub password (DOCKER_PASSWORD) ? ')
_system('docker login -u {0} -p {1}'.format(docker_username,
docker_password), logged=False)
for docker_file, docker_image in docker:
_system('docker build -f {0} -t {1} {2}'.format(docker_file,
docker_image, ROOT_DIR))
for _, docker_image in docker:
_system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))
_system('docker push {1}/{0}'.format(docker_image, docker_username))
def _system(cmd, logged=True):
if logged:
print('$ {0}'.format(cmd))
if os.system(cmd) > 0:
raise OSError()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
<|reserved_special_token_1|>
import getpass
import os
import subprocess
import sys
from builtins import input
SCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))
ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
def main(arguments):
docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (
'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]
docker_username = os.environ.get('DOCKER_USERNAME', None)
if docker_username is None:
docker_username = input('docker hub user (DOCKER_USERNAME) ? ')
docker_password = os.environ.get('DOCKER_PASSWORD', None)
if docker_password is None:
docker_password = getpass.getpass(
'docker hub password (DOCKER_PASSWORD) ? ')
_system('docker login -u {0} -p {1}'.format(docker_username,
docker_password), logged=False)
for docker_file, docker_image in docker:
_system('docker build -f {0} -t {1} {2}'.format(docker_file,
docker_image, ROOT_DIR))
for _, docker_image in docker:
_system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))
_system('docker push {1}/{0}'.format(docker_image, docker_username))
def _system(cmd, logged=True):
if logged:
print('$ {0}'.format(cmd))
if os.system(cmd) > 0:
raise OSError()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
<|reserved_special_token_1|>
#!/usr/bin/env python
# On CI, you can pass the logging and the password of dockerhub through
# the environment variables DOCKER_USERNAME and DOCKER_PASSWORD
import getpass
import os
import subprocess
import sys
from builtins import input
SCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))
ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
def main(arguments):
docker = [
('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'),
('Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra'),
]
docker_username = os.environ.get('DOCKER_USERNAME', None)
if docker_username is None:
docker_username = input('docker hub user (DOCKER_USERNAME) ? ')
docker_password = os.environ.get('DOCKER_PASSWORD', None)
if docker_password is None:
docker_password = getpass.getpass('docker hub password (DOCKER_PASSWORD) ? ')
_system('docker login -u {0} -p {1}'.format(docker_username, docker_password), logged=False)
for docker_file, docker_image in docker:
_system('docker build -f {0} -t {1} {2}'.format(docker_file, docker_image, ROOT_DIR))
for _, docker_image in docker:
_system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))
_system('docker push {1}/{0}'.format(docker_image, docker_username))
def _system(cmd, logged = True):
if logged:
print('$ {0}'.format(cmd))
if os.system(cmd) > 0:
raise OSError()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
flexible
|
{
"blob_id": "1ad40ef3aa7c81b6eee4fe0b98bcdd2f1110ef8d",
"index": 5990,
"step-1": "<mask token>\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n",
"step-3": "<mask token>\nSCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))\nROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n",
"step-4": "import getpass\nimport os\nimport subprocess\nimport sys\nfrom builtins import input\nSCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))\nROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n",
"step-5": "#!/usr/bin/env python\n# On CI, you can pass the logging and the password of dockerhub through\n# the environment variables DOCKER_USERNAME and DOCKER_PASSWORD\n\nimport getpass\nimport os\nimport subprocess\nimport sys\n\nfrom builtins import input\n\nSCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))\nROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\ndef main(arguments):\n docker = [\n ('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'),\n ('Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra'),\n ]\n\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass('docker hub password (DOCKER_PASSWORD) ? ')\n\n _system('docker login -u {0} -p {1}'.format(docker_username, docker_password), logged=False)\n\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file, docker_image, ROOT_DIR))\n\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\ndef _system(cmd, logged = True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 15:19:49 2018
@author: haoyu
"""
import numpy as np
def train_test_split(X, y, test_ratio = 0.2, seed = None):
'''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''
assert X.shape[0] == y.shape[0], \
'the size of X must be equal to the size of y'
assert 0.0 <= test_ratio <=1.0, \
'test_ratio must be valid'
if seed:
np.random.seed(seed)
shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引
test_size = int(len(X) * test_ratio)
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
|
normal
|
{
"blob_id": "beda3d13e3dc12f7527f5c5ba8a0eb05c2734fd9",
"index": 6133,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_test_split(X, y, test_ratio=0.2, seed=None):\n \"\"\"将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test\"\"\"\n assert X.shape[0] == y.shape[0\n ], 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'\n if seed:\n np.random.seed(seed)\n shuffle_indexes = np.random.permutation(len(X))\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n return X_train, X_test, y_train, y_test\n",
"step-3": "<mask token>\nimport numpy as np\n\n\ndef train_test_split(X, y, test_ratio=0.2, seed=None):\n \"\"\"将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test\"\"\"\n assert X.shape[0] == y.shape[0\n ], 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'\n if seed:\n np.random.seed(seed)\n shuffle_indexes = np.random.permutation(len(X))\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n return X_train, X_test, y_train, y_test\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 4 15:19:49 2018\n\n@author: haoyu\n\"\"\"\nimport numpy as np\n\ndef train_test_split(X, y, test_ratio = 0.2, seed = None):\n '''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''\n assert X.shape[0] == y.shape[0], \\\n 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <=1.0, \\\n 'test_ratio must be valid'\n \n if seed:\n np.random.seed(seed)\n \n shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引\n\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n \n return X_train, X_test, y_train, y_test",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class recommendationsys:
def __init__(self, nyear):
self.activityyear = 10
self.debug = 0
self.nremd = 3
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def resentpublicationsidx(self, authoridx):
resentpub = []
idx = self.authortitlesidx[authoridx]
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:
return resentpub
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([('name', author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime(
'%Y-%m-%d %H:%M:%S')
resentpub.append(OrderedDict([('title', self.rawtitles[i]), (
'authors', authorsjson), ('year', date), (
'publicationVenue', self.booktitle[i])]))
return resentpub
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs = []
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
contentjson = []
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
contentjson.append(OrderedDict([('topic', topic[0])]))
return contentjson
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def mycoauthorsV3(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(
newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0
]
return cosinesimilarity
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx, nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
<|reserved_special_token_0|>
def showcontents(self, labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
<|reserved_special_token_0|>
def digstring(self, s):
for i in s:
if i.isdigit():
return True
return False
<|reserved_special_token_0|>
def distance(self, a, b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a)
b = np.array(b)
return np.sqrt(sum(np.square(a - b)))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def per_org_label(self):
f = codecs.open(self.f_perorglabel, 'r', 'utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' +
str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
else:
name = name.decode('utf-8')
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.
tfidfarray, featuretfidf, 0)
self.debugmsg('end distance computing \n', 0)
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
recommendations = []
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
i = i + 1
if i == len(self.closeauthordis) or backwardcount > 1:
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
random.shuffle(recommendations)
self.result = OrderedDict([('name', name), ('recommendations',
recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getremdinfo(self, clsidx):
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def getremdinfoV2(self, clsidx):
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
return []
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
[coauthors, idx, c] = self.mycoauthorsV4(name)
if idx.count(self.myidx):
i = i + 1
continue
recentpub = self.resentpublications(name)
if not recentpub:
i = i + 1
continue
self.filteredauthors.append(name)
researchtopic = []
researchtopic.append(OrderedDict([('topic', 'TBD')]))
recommendations.append(OrderedDict([('name', name), (
'relevancy', self.closeauthordis[i]), ('coAuthors',
coauthors), ('researchTopics', researchtopic), (
'recentPublications', recentpub)]))
self.filteridx.append(i)
i = i + 1
if len(self.filteridx) == n:
break
return recommendations
<|reserved_special_token_0|>
def thresholdrecommendations(self, remds, n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis > t1])
self.trd[1] = len(tdis[tdis < t1])
self.trd[2] = len(tdis) - len(tdis[tdis > t2])
for i in range(3):
for j in range(int(n / 3)):
k = int(self.trd[i] + j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class recommendationsys:
def __init__(self, nyear):
self.activityyear = 10
self.debug = 0
self.nremd = 3
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
<|reserved_special_token_0|>
def debugmsg(self, msg, lvl):
if self.debug <= lvl:
print(msg)
<|reserved_special_token_0|>
def resentpublicationsidx(self, authoridx):
resentpub = []
idx = self.authortitlesidx[authoridx]
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:
return resentpub
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([('name', author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime(
'%Y-%m-%d %H:%M:%S')
resentpub.append(OrderedDict([('title', self.rawtitles[i]), (
'authors', authorsjson), ('year', date), (
'publicationVenue', self.booktitle[i])]))
return resentpub
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs = []
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
contentjson = []
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
contentjson.append(OrderedDict([('topic', topic[0])]))
return contentjson
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def mycoauthorsV3(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def secondhoopties(self, userX, userY, year):
result = []
coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)
for i in coauthors1:
coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)
for n in coauthors2:
coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)
if userY in coauthors3:
result.append([[i, n], [count1[coauthors1.index(i)],
count2[coauthors2.index(n)], count3[coauthors3.
index(userY)]]])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(
newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0
]
return cosinesimilarity
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def textnormalizing(self, text):
c = 0
for i in text:
if i[-1] == 's':
ii = i[:-1]
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-2:] == 'es':
ii = i[:-2]
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-3:] == 'ies':
ii = i[:-3] + 'y'
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-3:] == 'ing':
ii = i[:-3]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-4]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-3] + 'e'
if ii in text:
text[c] = c + 1
continue
c = c + 1
return text
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx, nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
<|reserved_special_token_0|>
def showcontents(self, labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
<|reserved_special_token_0|>
def digstring(self, s):
for i in s:
if i.isdigit():
return True
return False
<|reserved_special_token_0|>
def distance(self, a, b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a)
b = np.array(b)
return np.sqrt(sum(np.square(a - b)))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def per_org_label(self):
f = codecs.open(self.f_perorglabel, 'r', 'utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' +
str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
else:
name = name.decode('utf-8')
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.
tfidfarray, featuretfidf, 0)
self.debugmsg('end distance computing \n', 0)
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
recommendations = []
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
i = i + 1
if i == len(self.closeauthordis) or backwardcount > 1:
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
random.shuffle(recommendations)
self.result = OrderedDict([('name', name), ('recommendations',
recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getremdinfo(self, clsidx):
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def getremdinfoV2(self, clsidx):
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
return []
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def updatedistance(self):
deg1con = self.coauthornet[self.myidx, self.closeauthors]
deg1conidx = np.where(deg1con > 0)[0]
deg2conidx = np.where(deg1con == 0)[0]
deg2con = np.zeros(deg2conidx.size)
for i in self.closeauthors[deg1conidx]:
deg2con = deg2con + self.coauthornet[i, self.closeauthors[
deg2conidx]]
deg1con = deg1con[deg1con > 0]
deg1con = deg1con / max(deg1con)
return deg1conidx, deg1con, deg2conidx, deg2con
<|reserved_special_token_0|>
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
[coauthors, idx, c] = self.mycoauthorsV4(name)
if idx.count(self.myidx):
i = i + 1
continue
recentpub = self.resentpublications(name)
if not recentpub:
i = i + 1
continue
self.filteredauthors.append(name)
researchtopic = []
researchtopic.append(OrderedDict([('topic', 'TBD')]))
recommendations.append(OrderedDict([('name', name), (
'relevancy', self.closeauthordis[i]), ('coAuthors',
coauthors), ('researchTopics', researchtopic), (
'recentPublications', recentpub)]))
self.filteridx.append(i)
i = i + 1
if len(self.filteridx) == n:
break
return recommendations
<|reserved_special_token_0|>
def thresholdrecommendations(self, remds, n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis > t1])
self.trd[1] = len(tdis[tdis < t1])
self.trd[2] = len(tdis) - len(tdis[tdis > t2])
for i in range(3):
for j in range(int(n / 3)):
k = int(self.trd[i] + j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class recommendationsys:
def __init__(self, nyear):
self.activityyear = 10
self.debug = 0
self.nremd = 3
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
<|reserved_special_token_0|>
def debugmsg(self, msg, lvl):
if self.debug <= lvl:
print(msg)
<|reserved_special_token_0|>
def resentpublicationsidx(self, authoridx):
resentpub = []
idx = self.authortitlesidx[authoridx]
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:
return resentpub
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([('name', author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime(
'%Y-%m-%d %H:%M:%S')
resentpub.append(OrderedDict([('title', self.rawtitles[i]), (
'authors', authorsjson), ('year', date), (
'publicationVenue', self.booktitle[i])]))
return resentpub
<|reserved_special_token_0|>
def resentpublications(self, name):
resentpub = []
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
idx = self.authortitlesidx[idx]
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:
return resentpub
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([('name', author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime(
'%Y-%m-%d %H:%M:%S')
resentpub.append(OrderedDict([('title', self.rawtitles[i]), (
'authors', authorsjson), ('year', date), (
'publicationVenue', self.booktitle[i])]))
return resentpub
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs = []
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
contentjson = []
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
contentjson.append(OrderedDict([('topic', topic[0])]))
return contentjson
<|reserved_special_token_0|>
def keywordbyidx(self, idx):
contentjson = []
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
contentjson.append(OrderedDict([('topic', topic[0])]))
return contentjson
<|reserved_special_token_0|>
def bigramkeywords(self, text):
content = text
userpairs = list(nltk.bigrams(content))
keywordsbackup = []
keywords = []
for p in userpairs:
pairsdic = self.cfd[p[0]]
n = pairsdic[p[1]]
if n >= self.keywordthreshold:
keywords.append((p, n))
keywordsbackup.append((p, n))
finalkeywords = []
uniqkeywords = set(keywords)
keywords = sorted(uniqkeywords, key=lambda keywords: keywords[1])
for p in keywords:
if p[1] >= 25 or userpairs.count(p[0]) > 1:
finalkeywords.append([' '.join(p[0]), p[1], userpairs.count
(p[0])])
finalkeywords.reverse()
if not finalkeywords:
uniqkeywords = set(keywordsbackup)
keywordsbackup = sorted(uniqkeywords, key=lambda keywordsbackup:
keywordsbackup[1])
finalkeywords.append([' '.join(keywordsbackup[-1][0]),
keywordsbackup[-1][1], userpairs.count(keywordsbackup[0])])
else:
pluralidx = self.findpluralbigram(finalkeywords)
self.removepluralbigram(finalkeywords, pluralidx)
return finalkeywords
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
<|reserved_special_token_0|>
def mycoauthorsV2(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthorship = self.coauthornetV2[idx]
uniqcoauthors = np.array(list(set(coauthorship)))
coauthorcount = []
for i in uniqcoauthors:
coauthorcount.append(coauthorship.count(i))
countidx = np.argsort(coauthorcount)
countidx = countidx[::-1]
coauthorcount = np.array(coauthorcount)
result = []
for i in countidx:
result.append(OrderedDict([('name', self.authors[uniqcoauthors[
i]]), ('cooperationCount', coauthorcount[i])]))
return result, list(uniqcoauthors[countidx]), list(coauthorcount[
countidx])
<|reserved_special_token_0|>
def mycoauthorsV3(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4bymentionlist(self, name):
if name in self.mentionnetwork.keys():
mentiondict = self.mentionnetwork[name]
else:
mentiondict = {'None': 0}
result = []
sorted_mentiondict = sorted(mentiondict.items(), key=operator.
itemgetter(1), reverse=True)
for i in sorted_mentiondict:
result.append(OrderedDict([('name', i[0]), ('cooperationCount',
i[1])]))
return result
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def secondhoopties(self, userX, userY, year):
result = []
coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)
for i in coauthors1:
coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)
for n in coauthors2:
coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)
if userY in coauthors3:
result.append([[i, n], [count1[coauthors1.index(i)],
count2[coauthors2.index(n)], count3[coauthors3.
index(userY)]]])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getVenue(self, userIdx):
venues = self.authorbooktitleidx[userIdx]
c = Counter(venues)
frqvenues = c.most_common()
return frqvenues[0][0]
<|reserved_special_token_0|>
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(
newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0
]
return cosinesimilarity
<|reserved_special_token_0|>
def networksimilarity(self, userX, userY, year):
coauthors, c = self.mycoauthorsbyyear(userX, year)
edgesFG = len(coauthors)
n = 0
for i in coauthors:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(coauthors[n:]))
edgesFG = edgesFG + len(con)
n = n + 1
weakties, cx, cy = self.weakties(userX, userY, year)
edgesMFG = 2 * len(weakties)
n = 0
for i in weakties:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(weakties[n:]))
edgesMFG = edgesMFG + len(con)
n = n + 1
if edgesFG * edgesMFG:
ns = np.log(edgesMFG) / np.log(2 * edgesFG)
else:
ns = -1
return ns, edgesFG, edgesMFG, cx, cy
<|reserved_special_token_0|>
def textnormalizing(self, text):
c = 0
for i in text:
if i[-1] == 's':
ii = i[:-1]
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-2:] == 'es':
ii = i[:-2]
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-3:] == 'ies':
ii = i[:-3] + 'y'
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-3:] == 'ing':
ii = i[:-3]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-4]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-3] + 'e'
if ii in text:
text[c] = c + 1
continue
c = c + 1
return text
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx, nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
<|reserved_special_token_0|>
def showcontents(self, labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
<|reserved_special_token_0|>
def digstring(self, s):
for i in s:
if i.isdigit():
return True
return False
<|reserved_special_token_0|>
def distance(self, a, b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a)
b = np.array(b)
return np.sqrt(sum(np.square(a - b)))
<|reserved_special_token_0|>
def updatecoauthornetworkV2(self, net, authors, namelist):
nameidx = []
for name in namelist:
nameidx.append(authors.index(name))
for i in nameidx:
tmpidx = nameidx[:]
tmpidx.remove(i)
if not net:
net.append(tmpidx)
elif i > len(net) - 1:
net.append(tmpidx)
else:
net[i].extend(tmpidx)
<|reserved_special_token_0|>
def per_org_label(self):
f = codecs.open(self.f_perorglabel, 'r', 'utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
<|reserved_special_token_0|>
def mention_network(self):
f = codecs.open(self.f_mentionnetwork, 'r', 'utf-8')
source = ''
network = {}
for line in f:
items = line.split('"')
if source == '':
source = items[0]
target = {}
if source == items[0]:
target[items[1]] = int(items[2])
else:
network[items[0]] = target
source = items[0]
target = {}
f.close()
return network
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' +
str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
else:
name = name.decode('utf-8')
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.
tfidfarray, featuretfidf, 0)
self.debugmsg('end distance computing \n', 0)
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
recommendations = []
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
i = i + 1
if i == len(self.closeauthordis) or backwardcount > 1:
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
random.shuffle(recommendations)
self.result = OrderedDict([('name', name), ('recommendations',
recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getremdinfo(self, clsidx):
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def getremdinfoV2(self, clsidx):
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
return []
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def updatedistance(self):
deg1con = self.coauthornet[self.myidx, self.closeauthors]
deg1conidx = np.where(deg1con > 0)[0]
deg2conidx = np.where(deg1con == 0)[0]
deg2con = np.zeros(deg2conidx.size)
for i in self.closeauthors[deg1conidx]:
deg2con = deg2con + self.coauthornet[i, self.closeauthors[
deg2conidx]]
deg1con = deg1con[deg1con > 0]
deg1con = deg1con / max(deg1con)
return deg1conidx, deg1con, deg2conidx, deg2con
<|reserved_special_token_0|>
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
[coauthors, idx, c] = self.mycoauthorsV4(name)
if idx.count(self.myidx):
i = i + 1
continue
recentpub = self.resentpublications(name)
if not recentpub:
i = i + 1
continue
self.filteredauthors.append(name)
researchtopic = []
researchtopic.append(OrderedDict([('topic', 'TBD')]))
recommendations.append(OrderedDict([('name', name), (
'relevancy', self.closeauthordis[i]), ('coAuthors',
coauthors), ('researchTopics', researchtopic), (
'recentPublications', recentpub)]))
self.filteridx.append(i)
i = i + 1
if len(self.filteridx) == n:
break
return recommendations
<|reserved_special_token_0|>
def thresholdrecommendations(self, remds, n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis > t1])
self.trd[1] = len(tdis[tdis < t1])
self.trd[2] = len(tdis) - len(tdis[tdis > t2])
for i in range(3):
for j in range(int(n / 3)):
k = int(self.trd[i] + j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
<|reserved_special_token_0|>
def filteredcloseauthordis(self):
return self.closeauthordis[self.filteridx]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class recommendationsys:
def __init__(self, nyear):
self.activityyear = 10
self.debug = 0
self.nremd = 3
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
<|reserved_special_token_0|>
def debugmsg(self, msg, lvl):
if self.debug <= lvl:
print(msg)
<|reserved_special_token_0|>
def resentpublicationsidx(self, authoridx):
resentpub = []
idx = self.authortitlesidx[authoridx]
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:
return resentpub
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([('name', author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime(
'%Y-%m-%d %H:%M:%S')
resentpub.append(OrderedDict([('title', self.rawtitles[i]), (
'authors', authorsjson), ('year', date), (
'publicationVenue', self.booktitle[i])]))
return resentpub
<|reserved_special_token_0|>
def resentpublications(self, name):
resentpub = []
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
idx = self.authortitlesidx[idx]
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:
return resentpub
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([('name', author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime(
'%Y-%m-%d %H:%M:%S')
resentpub.append(OrderedDict([('title', self.rawtitles[i]), (
'authors', authorsjson), ('year', date), (
'publicationVenue', self.booktitle[i])]))
return resentpub
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs = []
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
contentjson = []
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
contentjson.append(OrderedDict([('topic', topic[0])]))
return contentjson
<|reserved_special_token_0|>
def keywordbyidx(self, idx):
contentjson = []
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
contentjson.append(OrderedDict([('topic', topic[0])]))
return contentjson
<|reserved_special_token_0|>
def bigramkeywords(self, text):
content = text
userpairs = list(nltk.bigrams(content))
keywordsbackup = []
keywords = []
for p in userpairs:
pairsdic = self.cfd[p[0]]
n = pairsdic[p[1]]
if n >= self.keywordthreshold:
keywords.append((p, n))
keywordsbackup.append((p, n))
finalkeywords = []
uniqkeywords = set(keywords)
keywords = sorted(uniqkeywords, key=lambda keywords: keywords[1])
for p in keywords:
if p[1] >= 25 or userpairs.count(p[0]) > 1:
finalkeywords.append([' '.join(p[0]), p[1], userpairs.count
(p[0])])
finalkeywords.reverse()
if not finalkeywords:
uniqkeywords = set(keywordsbackup)
keywordsbackup = sorted(uniqkeywords, key=lambda keywordsbackup:
keywordsbackup[1])
finalkeywords.append([' '.join(keywordsbackup[-1][0]),
keywordsbackup[-1][1], userpairs.count(keywordsbackup[0])])
else:
pluralidx = self.findpluralbigram(finalkeywords)
self.removepluralbigram(finalkeywords, pluralidx)
return finalkeywords
<|reserved_special_token_0|>
def removepluralbigram(self, bigram, pluralidx):
if not pluralidx:
print('empty')
return
delcount = 0
pren = 0
for i in pluralidx:
for n in i[1:]:
if n > pren:
n = n - delcount
bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]
bigram.remove(bigram[n])
delcount = delcount + 1
pren = n
<|reserved_special_token_0|>
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
<|reserved_special_token_0|>
def mycoauthorsV2(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthorship = self.coauthornetV2[idx]
uniqcoauthors = np.array(list(set(coauthorship)))
coauthorcount = []
for i in uniqcoauthors:
coauthorcount.append(coauthorship.count(i))
countidx = np.argsort(coauthorcount)
countidx = countidx[::-1]
coauthorcount = np.array(coauthorcount)
result = []
for i in countidx:
result.append(OrderedDict([('name', self.authors[uniqcoauthors[
i]]), ('cooperationCount', coauthorcount[i])]))
return result, list(uniqcoauthors[countidx]), list(coauthorcount[
countidx])
<|reserved_special_token_0|>
def mycoauthorsV3(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([('name', self.authors[unicoauthors[-
(i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))
return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mycoauthorsV4bymentionlist(self, name):
if name in self.mentionnetwork.keys():
mentiondict = self.mentionnetwork[name]
else:
mentiondict = {'None': 0}
result = []
sorted_mentiondict = sorted(mentiondict.items(), key=operator.
itemgetter(1), reverse=True)
for i in sorted_mentiondict:
result.append(OrderedDict([('name', i[0]), ('cooperationCount',
i[1])]))
return result
<|reserved_special_token_0|>
def mycoauthorsbyyear(self, idx, year):
years = np.array(self.years)
yearidx = np.where(years <= year)[0]
coauthorsidx = [self.coauthorsidx[i] for i in yearidx]
coauthors = []
for i in coauthorsidx:
if idx in i:
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
return list(unicoauthors[::-1]), list(coauthorcount[::-1])
<|reserved_special_token_0|>
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def secondhoopties(self, userX, userY, year):
result = []
coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)
for i in coauthors1:
coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)
for n in coauthors2:
coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)
if userY in coauthors3:
result.append([[i, n], [count1[coauthors1.index(i)],
count2[coauthors2.index(n)], count3[coauthors3.
index(userY)]]])
<|reserved_special_token_0|>
def getcontentbyyear(self, userIdx, year):
titleIdx = self.authortitlesidx[userIdx]
titleIdx = np.array(titleIdx)
years = [self.years[i] for i in titleIdx]
years = np.array(years)
years.sort()
years = years[::-1]
yearIdx = np.where(years <= year)[0]
content = [self.titles[i] for i in titleIdx[yearIdx]]
return content
<|reserved_special_token_0|>
def getVenue(self, userIdx):
venues = self.authorbooktitleidx[userIdx]
c = Counter(venues)
frqvenues = c.most_common()
return frqvenues[0][0]
<|reserved_special_token_0|>
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(
newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0
]
return cosinesimilarity
<|reserved_special_token_0|>
def networksimilarity(self, userX, userY, year):
coauthors, c = self.mycoauthorsbyyear(userX, year)
edgesFG = len(coauthors)
n = 0
for i in coauthors:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(coauthors[n:]))
edgesFG = edgesFG + len(con)
n = n + 1
weakties, cx, cy = self.weakties(userX, userY, year)
edgesMFG = 2 * len(weakties)
n = 0
for i in weakties:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(weakties[n:]))
edgesMFG = edgesMFG + len(con)
n = n + 1
if edgesFG * edgesMFG:
ns = np.log(edgesMFG) / np.log(2 * edgesFG)
else:
ns = -1
return ns, edgesFG, edgesMFG, cx, cy
<|reserved_special_token_0|>
def textnormalizing(self, text):
c = 0
for i in text:
if i[-1] == 's':
ii = i[:-1]
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-2:] == 'es':
ii = i[:-2]
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-3:] == 'ies':
ii = i[:-3] + 'y'
if ii in text:
text[c] = ii
c = c + 1
continue
if i[-3:] == 'ing':
ii = i[:-3]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-4]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-3] + 'e'
if ii in text:
text[c] = c + 1
continue
c = c + 1
return text
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx, nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
<|reserved_special_token_0|>
def showcontents(self, labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
<|reserved_special_token_0|>
def digstring(self, s):
for i in s:
if i.isdigit():
return True
return False
<|reserved_special_token_0|>
def distance(self, a, b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a)
b = np.array(b)
return np.sqrt(sum(np.square(a - b)))
<|reserved_special_token_0|>
def updatecoauthornetworkV2(self, net, authors, namelist):
nameidx = []
for name in namelist:
nameidx.append(authors.index(name))
for i in nameidx:
tmpidx = nameidx[:]
tmpidx.remove(i)
if not net:
net.append(tmpidx)
elif i > len(net) - 1:
net.append(tmpidx)
else:
net[i].extend(tmpidx)
<|reserved_special_token_0|>
def per_org_label(self):
f = codecs.open(self.f_perorglabel, 'r', 'utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
<|reserved_special_token_0|>
def mention_network(self):
f = codecs.open(self.f_mentionnetwork, 'r', 'utf-8')
source = ''
network = {}
for line in f:
items = line.split('"')
if source == '':
source = items[0]
target = {}
if source == items[0]:
target[items[1]] = int(items[2])
else:
network[items[0]] = target
source = items[0]
target = {}
f.close()
return network
<|reserved_special_token_0|>
def docluster(self):
tokenizer = RegexpTokenizer('\\w+')
self.rawtitles = []
self.titles = []
self.allcorp = []
sw = set(nltk.corpus.stopwords.words('english'))
self.debugmsg('start titles \n', 0)
f = codecs.open(self.f_titles, 'r', 'utf-8')
for line in f:
if line[-1] == '\n':
line = line[:-1]
self.rawtitles.append(line)
line = line.lower()
tokenlist = tokenizer.tokenize(line)
self.allcorp += tokenlist
tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in
sw) & ~self.digstring(w)])
self.titles.append(tokenlist)
f.close()
self.authordict = {}
self.authors = []
self.authorcontents = []
self.authorrawcontents = []
self.authortitlesidx = []
self.authorbooktitleidx = []
self.coathors = []
self.coauthorsidx = []
self.mentionnetwork = {}
self.id_name = {}
self.coauthornetV2 = []
self.mentionnetwork = self.mention_network()
self.debugmsg('start year \n', 0)
self.years = []
f = codecs.open(self.f_years, 'r', 'utf-8')
for line in f:
if line[-1] == '\n':
line = line[:-1]
if line == '':
line = 0
timestamp = time.mktime(parser.parse(line).timetuple())
self.years.append(int(timestamp))
f.close()
self.debugmsg('start booktitle \n', 0)
self.booktitle = []
f = codecs.open(self.f_booktitle, 'r', 'utf-8')
for line in f:
line = line[:-1]
self.booktitle.append(line)
f.close()
self.debugmsg('start authors \n', 0)
i = 0
m = 0
f = codecs.open(self.f_authors, 'r', 'utf-8')
for line in f:
line = line[:-1]
newline = line.split(',')
namelist = newline
self.coathors.append(namelist)
authoridx = []
for name in newline:
idx = self.authordict.get(name)
if idx is not None:
self.authortitlesidx[idx].append(i)
self.authorbooktitleidx[idx].append(i)
self.authorcontents[idx] = self.authorcontents[idx
] + ' ' + self.titles[i]
self.authorrawcontents[idx] = self.authorrawcontents[idx
] + ' ' + self.rawtitles[i]
else:
self.authors.append(name)
self.authordict[name] = m
self.authorcontents.append(self.titles[i])
self.authorrawcontents.append(self.rawtitles[i])
self.authortitlesidx.append([i])
self.authorbooktitleidx.append([i])
idx = m
m = m + 1
authoridx.append(idx)
self.coauthorsidx.append(authoridx)
i = i + 1
f.close()
f = codecs.open(self.f_authors_id, 'r', 'utf-8')
i = 0
preline = ''
for line in f:
if preline != line:
if line[-1] == '\n':
newline = line[:-1]
self.id_name[self.authors[i]] = newline
preline = line
i = i + 1
else:
continue
f.close()
self.per_org_label()
self.vectorizer = CountVectorizer(max_df=0.95, min_df=1, stop_words
='english')
X = self.vectorizer.fit_transform(self.authorcontents)
Xarray = X
transformer = TfidfTransformer()
self.tfidf = transformer.fit_transform(Xarray)
self.tfidfarray = self.tfidf
self.featurenames = self.vectorizer.get_feature_names()
<|reserved_special_token_0|>
def recommendationV3(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' +
str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
else:
name = name.decode('utf-8')
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.
tfidfarray, featuretfidf, 0)
self.debugmsg('end distance computing \n', 0)
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
recommendations = []
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
remdinfo = self.getremdinfo(i)
if remdinfo and ~remdidx.count(i):
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
i = i + 1
if i == len(self.closeauthordis) or backwardcount > 1:
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
random.shuffle(recommendations)
self.result = OrderedDict([('name', name), ('recommendations',
recommendations)])
self.debugmsg('end recommendationV3 \n', 0)
return self.result
<|reserved_special_token_0|>
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' +
str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
else:
name = name.decode('utf-8')
name = ud.normalize('NFC', name)
authorIdx = self.authordict.get(name)
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.
tfidfarray, featuretfidf, 0)
self.debugmsg('end distance computing \n', 0)
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
recommendations = []
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
i = i + 1
if i == len(self.closeauthordis) or backwardcount > 1:
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
random.shuffle(recommendations)
self.result = OrderedDict([('name', name), ('recommendations',
recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
<|reserved_special_token_0|>
def nNNlinesearch(self, space, p, n):
closeauthordis = []
closeauthordis = pairwise_distances(space, p, metric='cosine')
closeauthordis = closeauthordis.flatten()
closeauthors = closeauthordis.argsort()
closeauthordis.sort()
if n > 0:
closeauthors = closeauthors[0:n]
closeauthordis = closeauthordis[0:n]
idx = np.where(closeauthors == self.myidx)[0][0]
closeauthors = np.delete(closeauthors, idx)
closeauthordis = np.delete(closeauthordis, idx)
return closeauthors, closeauthordis
<|reserved_special_token_0|>
def otsufilter(self, tdis):
trd = np.zeros(3, int)
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis > t1])
trd[1] = len(tdis[tdis < t1]) + int((len(tdis[tdis < t2]) - len(
tdis[tdis < t1])) / 2) - 1
trd[2] = len(tdis) - 3
return trd
<|reserved_special_token_0|>
def getremdinfo(self, clsidx):
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def getremdinfoV2(self, clsidx):
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
return []
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([('name', name), ('relevancy', self.
closeauthordis[clsidx]), ('coAuthors', coauthors), (
'researchTopics', researchtopic), ('recentPublications',
recentpub)])
else:
return []
<|reserved_special_token_0|>
def updatedistance(self):
deg1con = self.coauthornet[self.myidx, self.closeauthors]
deg1conidx = np.where(deg1con > 0)[0]
deg2conidx = np.where(deg1con == 0)[0]
deg2con = np.zeros(deg2conidx.size)
for i in self.closeauthors[deg1conidx]:
deg2con = deg2con + self.coauthornet[i, self.closeauthors[
deg2conidx]]
deg1con = deg1con[deg1con > 0]
deg1con = deg1con / max(deg1con)
return deg1conidx, deg1con, deg2conidx, deg2con
<|reserved_special_token_0|>
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
[coauthors, idx, c] = self.mycoauthorsV4(name)
if idx.count(self.myidx):
i = i + 1
continue
recentpub = self.resentpublications(name)
if not recentpub:
i = i + 1
continue
self.filteredauthors.append(name)
researchtopic = []
researchtopic.append(OrderedDict([('topic', 'TBD')]))
recommendations.append(OrderedDict([('name', name), (
'relevancy', self.closeauthordis[i]), ('coAuthors',
coauthors), ('researchTopics', researchtopic), (
'recentPublications', recentpub)]))
self.filteridx.append(i)
i = i + 1
if len(self.filteridx) == n:
break
return recommendations
<|reserved_special_token_0|>
def thresholdrecommendations(self, remds, n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis > t1])
self.trd[1] = len(tdis[tdis < t1])
self.trd[2] = len(tdis) - len(tdis[tdis > t2])
for i in range(3):
for j in range(int(n / 3)):
k = int(self.trd[i] + j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
<|reserved_special_token_0|>
def filteredcloseauthordis(self):
return self.closeauthordis[self.filteridx]
<|reserved_special_token_0|>
def save_json(self, filename):
PROJECT_DIRECTORY = 'output/project/' + project_name + '/'
with io.open(PROJECT_DIRECTORY + filename + '.json', 'w', encoding=
'utf-8') as outfile:
outfile.write(json.dumps(self.result, ensure_ascii=False))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 16:38:22 2017
@author: secoder
"""
import io
import random
import nltk
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from collections import OrderedDict
from collections import Counter
from sklearn.metrics import pairwise_distances
import numpy as np
import scipy
import json
import codecs
from dateutil import parser
import time
import datetime
import operator
#import cPickle as pickle
#
#import traceback
from skimage import filters
import unicodedata as ud
from config import project_name
class recommendationsys:
def __init__(self, nyear):
# by default we will filter out those don't have publications in recent 10 years
self.activityyear = 10
self.debug = 0
self.nremd = 3
#----------------------
PROJECT_DIRECTORY = 'output/project/' + project_name
self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'
self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'
self.f_years = PROJECT_DIRECTORY + '/years_target.txt'
self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'
self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'
self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'
self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'
self.npaper = 10
self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())
self.keywordthreshold = 10
#----------------------
self.debugmsg('start init', 0)
self.docluster()
self.initNLTKConditionalFreqDist()
self.filterN = len(self.authors)
self.debugmsg('end init\n', 0)
"""
"""
def debugmsg(self, msg, lvl):
if self.debug <= lvl:
print(msg)
"""
"""
def resentpublicationsidx(self,authoridx):
#print 'start recentpublications\n'
resentpub = []
idx = self.authortitlesidx[authoridx]
# sort by years
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
# if the most recent publication is before the 'nyears'
# remove this one from the list
if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):
return resentpub
# ----
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([("name",author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime("%Y-%m-%d %H:%M:%S")
resentpub.append(OrderedDict([("title",self.rawtitles[i]),("authors",authorsjson), ("year",date),("publicationVenue",self.booktitle[i])]))
#print 'end recentpublications\n'
return resentpub
"""
"""
def resentpublications(self,name):
#print 'start recentpublications\n'
resentpub = []
#if isinstance(name, unicode): for python 2.7
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
idx = self.authortitlesidx[idx]
# sort by years
years = [self.years[i] for i in idx]
years = np.array(years)
years = years.argsort()
idx = np.array(idx)[years]
idx = idx.tolist()
idx.reverse()
# if the most recent publication is before the 'nyears'
# remove this one from the list
if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):
return resentpub
# ----
for i in idx:
authorsjson = []
for author in self.coathors[i]:
authorsjson.append(OrderedDict([("name",author)]))
date = datetime.datetime.fromtimestamp(self.years[i]).strftime("%Y-%m-%d %H:%M:%S")
resentpub.append(OrderedDict([("title",self.rawtitles[i]),("authors",authorsjson), ("year",date),("publicationVenue",self.booktitle[i])]))
#print 'end recentpublications\n'
return resentpub
def initNLTKConditionalFreqDist(self):
self.debugmsg('start initNLTK CFD\n', 0)
pairs=[]
# for title in self.titles:
# pairs = pairs + list(nltk.bigrams(title.split()))
pairs = nltk.bigrams(self.allcorp)
self.cfd = nltk.ConditionalFreqDist(pairs)
self.debugmsg('end initNLTK CFD\n', 0)
def keyword(self,name):
#print 'start keyword\n'
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
# content = self.authorcontents[idx].lower()
#
# # get the unique words from the content
# content = set(content.split())
#
# i = []
# for c in content:
# count = self.vectorizer.vocabulary_.get(c, 0)
# i.append(count)
#
# i = np.array(i)
# i = i.argsort()
# content = np.array(list(content))
# content = content[i]
# content = content[-3:]
# keywords = list(reversed(content))
#
contentjson = []
# for topic in keywords:
# contentjson.append(OrderedDict([("topic", topic)]))
# bigram keywords -------------
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
# #print 'start bigram\n'
#
# userpairs = list(nltk.bigrams(content))
#
#
# # do the same on raw titles
#
# keywordsraw=[]
# for p in userpairs:
# pairsdic=self.cfd[p[0]]
# n=pairsdic[p[1]]
# if n>=2:
# keywordsraw.append((p,n))
#
# uniqkeywords=set(keywordsraw)
# keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])
#
# finalkeywords=[]
# for p in keywords:
# #c=wn.synsets(p[0][1])[0].pos()
# if (p[1]>=2):
# finalkeywords.append((' '.join(p[0]),p[1],keywordsraw.count(p)))
#
# finalkeywords.reverse()
for topic in finalkeywords:
#print topic[0]
contentjson.append(OrderedDict([("topic", topic[0])]))
#print 'end bigram\n'
#print 'end keyword\n'
return contentjson
"""
"""
def keywordbyidx(self,idx):
contentjson = []
# bigram keywords -------------
content = self.authorcontents[idx].lower().split()
finalkeywords = self.bigramkeywords(content)
for topic in finalkeywords:
#print topic[0]
contentjson.append(OrderedDict([("topic", topic[0])]))
return contentjson
"""
"""
def bigramkeywords(self, text):
#print 'start bigramkeyword\n'
# bigram keywords -------------
#content = text.lower().split()
content = text
#print 'start bigram\n'
userpairs = list(nltk.bigrams(content))
# in case there is no valid keywords due to our requirement
# the one with highest occurrence will be pick from the backup plan
keywordsbackup = []
# the valid keywords
keywords=[]
for p in userpairs:
pairsdic=self.cfd[p[0]]
n=pairsdic[p[1]]
if n>=self.keywordthreshold:
keywords.append((p,n))
keywordsbackup.append((p,n))
finalkeywords=[]
uniqkeywords=set(keywords)
keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])
for p in keywords:
if (p[1]>=25) or (userpairs.count(p[0])>1):
finalkeywords.append([' '.join(p[0]),p[1],userpairs.count(p[0])])
finalkeywords.reverse()
if not finalkeywords:
# found valid keywords
uniqkeywords=set(keywordsbackup)
keywordsbackup=sorted(uniqkeywords, key=lambda keywordsbackup: keywordsbackup[1])
finalkeywords.append([' '.join(keywordsbackup[-1][0]), keywordsbackup[-1][1],userpairs.count(keywordsbackup[0])])
else:
# deal with plural
pluralidx = self.findpluralbigram(finalkeywords)
self.removepluralbigram(finalkeywords,pluralidx)
#print 'end bigramkeyword\n'
return finalkeywords
"""
"""
def removepluralbigram(self, bigram, pluralidx):
# if pluralidx is emtpy, just return
if not pluralidx:
print('empty')
return
delcount = 0
pren = 0
for i in pluralidx:
#delcount = 0
for n in i[1:]:
if n > pren:
n = n - delcount
bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]
bigram.remove(bigram[n])
delcount = delcount + 1
pren = n
"""
"""
def findpluralbigram(self, keywordsinfo):
c = []
for i in keywordsinfo:
t = i[0].split()
t1 = ''
for n in t:
if n[-1] == 's':
n = n[:-1]
t1 = t1 + n
c.append(t1)
uniqbigram = list(set(c))
pluralidx = []
for i in uniqbigram:
count = c.count(i)
if count > 1:
cc = []
for n in range(len(c)):
if i == c[n]:
cc.append(n)
pluralidx.append(cc)
return pluralidx
"""
"""
def mycoauthorsV2(self, name):
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
coauthorship = self.coauthornetV2[idx]
uniqcoauthors = np.array(list(set(coauthorship)))
coauthorcount = []
for i in uniqcoauthors:
coauthorcount.append(coauthorship.count(i))
countidx = np.argsort(coauthorcount)
# reverse it to descend order
countidx = countidx[::-1]
coauthorcount = np.array(coauthorcount)
result = []
for i in countidx:
result.append(OrderedDict([("name",self.authors[uniqcoauthors[i]]),("cooperationCount",coauthorcount[i])]))
return (result,list(uniqcoauthors[countidx]),list(coauthorcount[countidx]))
"""
"""
def mycoauthorsV3(self, name):
if isinstance(name, str):
#idx = self.authors.index(name)
idx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4(self, name):
if isinstance(name, str):
idx = self.authordict.get(name)
else:
idx = self.authordict.get(name.decode('utf-8'))
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4byidx(self, idx):
coauthors = []
for i in self.coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
result = []
for i in range(len(coauthorcount)):
result.append(OrderedDict([("name",self.authors[unicoauthors[-(i+1)]]),("cooperationCount",coauthorcount[-(i+1)])]))
return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
"""
def mycoauthorsV4bymentionlist(self, name):
if name in self.mentionnetwork.keys():
mentiondict = self.mentionnetwork[name]
else:
mentiondict ={'None':0}
result = []
# sort by mention counts
sorted_mentiondict = sorted(mentiondict.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_mentiondict:
result.append(OrderedDict([("name",i[0]),("cooperationCount",i[1])]))
return result
"""
"""
def mycoauthorsbyyear(self, idx, year):
years = np.array(self.years)
yearidx = np.where(years <= year)[0]
coauthorsidx = [ self.coauthorsidx[i] for i in yearidx]
coauthors = []
for i in coauthorsidx:
if idx in i:
# remove itself
t = i[:]
t.remove(idx)
coauthors.extend(t)
coauthors = np.array(coauthors)
unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)
unicoauthors = unicoauthors[coauthorcount.argsort()]
coauthorcount.sort()
return (list(unicoauthors[::-1]),list(coauthorcount[::-1]))
"""
find the new coauthors for a user in current year against previous year
example: mynewcoauthors(23, 2014, 2015) will returen the new coauthors
in 2015 regarding the year 2014 for user 23. 23 is the index of a user
"""
def mynewcoauthors(self, userIdx, yearPre, yearCur):
coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)
coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)
newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)
return newCoauthors
"""
Call the weakties after mynewcoauthors() to find the common nodes
between a user and his/her coming new coauthors in the year before
their coauthorship
"""
def weakties(self, userX, userY, year):
coauthornetX, cx = self.mycoauthorsbyyear(userX, year)
# if userX and userY already have a strong ties, just return []
if userY in coauthornetX:
return ([], [], [])
coauthornetY, cy = self.mycoauthorsbyyear(userY, year)
# find the common nodes
weaktienodes = list(set(coauthornetX).intersection(coauthornetY))
nodescountX = []
nodescountY = []
if weaktienodes:
for i in weaktienodes:
nodescountX.append(cx[coauthornetX.index(i)])
nodescountY.append(cy[coauthornetY.index(i)])
return (weaktienodes, nodescountX, nodescountY)
"""
2nd hoop connection
"""
def secondhoopties(self, userX, userY, year):
result = []
coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)
for i in coauthors1:
coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)
for n in coauthors2:
coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)
if userY in coauthors3:
result.append([[i,n],[count1[coauthors1.index(i)],count2[coauthors2.index(n)], count3[coauthors3.index(userY)]]])
"""
Get all the content(paper titles) of the userIdx before
the 'year'(include the year)
"""
def getcontentbyyear(self, userIdx, year):
titleIdx = self.authortitlesidx[userIdx]
titleIdx = np.array(titleIdx)
years = [self.years[i] for i in titleIdx]
years = np.array(years)
# sort the years and put the latest year first
# then the content will also be sorted by recent paper first
years.sort()
years = years[::-1]
yearIdx = np.where(years<=year)[0]
content = [self.titles[i] for i in titleIdx[yearIdx]]
return content
"""
return the most frequent participated venue of a user
"""
def getVenue(self, userIdx):
venues = self.authorbooktitleidx[userIdx]
c = Counter(venues)
frqvenues = c.most_common()
return frqvenues[0][0]
"""
only consider the recent 10 papers
"""
def contentsimilarity(self, userX, userY, year):
contentX = self.getcontentbyyear(userX, year)
if not contentX:
return -1
contentX = contentX[0:10]
contentY = self.getcontentbyyear(userY, year)
if not contentY:
return -1
contentY = contentY[0:10]
# build the corpus of all the content
contents = []
for i in contentX:
contents.extend(i.split(' '))
lenx = len(contents)
for i in contentY:
contents.extend(i.split(' '))
# normalize the different forms of words
stemmer = nltk.stem.PorterStemmer()
stems = [stemmer.stem(t) for t in contents]
# reconstruct content for userX and userY use the normalized words
newcontentX = stems[0:lenx]
newcontentY = stems[lenx:]
vectorizer = CountVectorizer()
v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(newcontentY)])
cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0]
return cosinesimilarity
"""
network similarity
"""
def networksimilarity(self, userX, userY, year):
# first calculate FG(userX) according to paper
# User similarities on social networks
coauthors, c = self.mycoauthorsbyyear(userX, year)
edgesFG = len(coauthors)
n = 0
for i in coauthors:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(coauthors[n:]))
edgesFG = edgesFG + len(con)
n = n + 1
# second, calculate MFG(userX, userY)
weakties, cx, cy = self.weakties(userX, userY, year)
edgesMFG = 2 * len(weakties)
n = 0
for i in weakties:
subcoauthors, c = self.mycoauthorsbyyear(i, year)
con = list(set(subcoauthors).intersection(weakties[n:]))
edgesMFG = edgesMFG + len(con)
n = n + 1
# last calculate the network similarity
if edgesFG * edgesMFG:
ns = np.log(edgesMFG)/np.log(2 * edgesFG)
else:
ns = -1
return (ns, edgesFG, edgesMFG, cx, cy)
"""
text processing, normalize the words to their prototype, such as
plural form, progressive, etc
"""
def textnormalizing(self, text):
#l = len(text)
c = 0
for i in text:
# network - networks
if i[-1] == 's':
ii = i[:-1]
if ii in text:
text[c] = ii
c = c + 1
continue
# bus - buses
if i[-2:] == 'es':
ii = i[:-2]
if ii in text:
text[c] = ii
c = c + 1
continue
# study - studies
if i[-3:] == 'ies':
ii = i[:-3] + 'y'
if ii in text:
text[c] = ii
c = c + 1
continue
# network - networking
# get - getting
# explore - exploring
if i[-3:] == 'ing':
ii = i[:-3]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-4]
if ii in text:
text[c] = ii
c = c + 1
continue
ii = i[:-3] + 'e'
if ii in text:
text[c] = c + 1
continue
c = c + 1
return text
"""
"""
"""
radius of the cluster
"""
def radiusofcluster(self, labels, nth, dismatrix):
idx = np.where(labels == nth)[0]
dis = dismatrix[idx,nth]
self.mindis = min(dis)
self.maxdis = max(dis)
self.radius = self.maxdis
# return [mindis, maxdis, radius]
"""
show contents in the same cluster
"""
def showcontents(self,labels, nth, allcontents):
contents = []
idx = np.where(labels == nth)
idx = np.array(idx)
idx = idx.flatten()
for i in idx:
contents.append(allcontents[i])
return contents
"""
check if there is digtial in the string
"""
def digstring(self,s):
for i in s:
if i.isdigit():
return True
return False
"""
compute the distance between two points a and b
"""
def distance(self,a,b):
if scipy.sparse.issparse(a):
a = a.toarray()
a = a[0]
if scipy.sparse.issparse(b):
b = b.toarray()
b = b[0]
a = np.array(a);
b = np.array(b);
return np.sqrt(sum(np.square(a - b)))
"""
"""
def updatecoauthornetworkV2(self,net,authors,namelist):
nameidx = []
for name in namelist:
nameidx.append(authors.index(name))
for i in nameidx:
tmpidx = nameidx[:]
tmpidx.remove(i)
# if net is empty
if not net:
net.append(tmpidx)
else:
if i>len(net)-1:
net.append(tmpidx)
else:
net[i].extend(tmpidx)
"""
load the person or organization label
"""
def per_org_label(self):
f = codecs.open(self.f_perorglabel,'r','utf-8')
labels = {}
for line in f:
items = line.split()
labels[items[0]] = items[1]
f.close()
self.labels = labels
"""
"""
def mention_network(self):
f = codecs.open(self.f_mentionnetwork,'r','utf-8')
source=''
network = {}
for line in f:
items = line.split('"')
if source == '':
source = items[0]
target = {}
if source == items[0]:
target[items[1]] = int(items[2])
else:
network[items[0]] = target
source = items[0]
target = {}
f.close()
return network
"""
"""
def docluster(self):
tokenizer = RegexpTokenizer(r'\w+')
self.rawtitles = []
self.titles = []
self.allcorp = []
sw = set(nltk.corpus.stopwords.words('english'))
self.debugmsg('start titles \n', 0)
f = codecs.open(self.f_titles,'r','utf-8')
for line in f:
# remove the '\n' at the end
if line[-1] == '\n':
line = line[:-1]
self.rawtitles.append(line)
line = line.lower()
tokenlist = tokenizer.tokenize(line)
self.allcorp += tokenlist
#for corp in newline:
# self.allcorp.append(corp)
# collect all the words except digtals and stopwords
tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in sw) & ~(self.digstring(w))])
self.titles.append(tokenlist)
f.close()
# end use codecs
# filename = './CHI/CHI_authors.txt'
self.authordict = {}
self.authors = []
self.authorcontents = []
self.authorrawcontents = []
self.authortitlesidx = []
self.authorbooktitleidx = []
self.coathors = []
self.coauthorsidx = [] # undirect link, etc, dblp coauthorship network
self.mentionnetwork = {} # direct link, etc,tweet mention network
self.id_name = {}
self.coauthornetV2 = []
# readin the mention network
self.mentionnetwork = self.mention_network()
# read years
self.debugmsg('start year \n', 0)
self.years = []
f = codecs.open(self.f_years,'r','utf-8')
for line in f:
# remive \n
if line[-1] == '\n':
line = line[:-1]
if line == '':
line = 0
#line = line.split()
#year = line[-1]
timestamp = time.mktime(parser.parse(line).timetuple())
self.years.append(int(timestamp))
f.close()
# read conference
self.debugmsg('start booktitle \n', 0)
self.booktitle = []
f = codecs.open(self.f_booktitle,'r','utf-8')
for line in f:
# remove the \n at the end
line = line[:-1]
self.booktitle.append(line)
f.close()
# read authors
self.debugmsg('start authors \n', 0)
i = 0
m = 0
f = codecs.open(self.f_authors,'r','utf-8')
for line in f:
# remove the last '\n'
line = line[:-1]
# split the authors by ','
newline = line.split(",")
namelist = newline
self.coathors.append(namelist)
authoridx = []
for name in newline:
# dictonary version
idx = self.authordict.get(name)
if idx is not None:
self.authortitlesidx[idx].append(i)
self.authorbooktitleidx[idx].append(i)
self.authorcontents[idx] = self.authorcontents[idx] + ' ' + self.titles[i]
self.authorrawcontents[idx] = self.authorrawcontents[idx] + ' ' + self.rawtitles[i]
else:
self.authors.append(name)
self.authordict[name] = m
self.authorcontents.append(self.titles[i])
self.authorrawcontents.append(self.rawtitles[i])
self.authortitlesidx.append([i])
self.authorbooktitleidx.append([i])
idx = m
m = m + 1
authoridx.append(idx)
# end dict version
self.coauthorsidx.append(authoridx)
i = i + 1
f.close()
f = codecs.open(self.f_authors_id,'r','utf-8')
i = 0
preline = ''
for line in f:
if preline != line:
#print(i)
#print('preline: {}, line: {}'.format(preline, line))
if line[-1] == '\n':
newline = line[:-1]
self.id_name[self.authors[i]] = newline
preline = line
i = i + 1
else:
continue
#print(i)
f.close()
# load the per and org classification result
self.per_org_label()
self.vectorizer = CountVectorizer(max_df=0.95, min_df=1,stop_words='english')
X = self.vectorizer.fit_transform(self.authorcontents)
#Xarray = X.toarray()
Xarray = X
#plt.plot(hist)
transformer = TfidfTransformer()
self.tfidf = transformer.fit_transform(Xarray)
#self.tfidfarray = self.tfidf.toarray()
self.tfidfarray = self.tfidf
self.featurenames = self.vectorizer.get_feature_names()
"""
"""
def recommendationV3(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
#idx = self.authors.index(name)
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
name = name.decode('utf-8')
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
#content=[]
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
(self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)
self.debugmsg('end distance computing \n', 0)
# here we can define the range to apply the otsu for recommendations
# for example self.closeauthordis[0:1000] or all them
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
# splitidx contains the first index of three groups, close, medium, far
# now generate three recommendations in each group
recommendations = []
# save the valid remdidx
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
# skip myself go to next one
remdinfo = self.getremdinfo(i)
if remdinfo and ~remdidx.count(i):
#print remdinfo
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
#self.debugmsg(str(n) + ' ' + str(i), 0)
i = i + 1
# didn't find required number of valid remd untill the end
# start backwards search
if (i == len(self.closeauthordis)) or (backwardcount > 1):
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
#self.debugmsg('search backward ' + str(i), 0)
# randomlize the order of the recommendations
random.shuffle(recommendations)
self.result=OrderedDict([("name",name),("recommendations",recommendations)])
self.debugmsg('end recommendationV3 \n', 0)
return self.result
"""
"""
def recommendationV4(self, name, n):
self.nremd = n
self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)
self.debugmsg('find the idx', 0)
if isinstance(name, str):
#idx = self.authors.index(name)
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
else:
#idx = self.authors.index(name.decode('utf-8'))
name = name.decode('utf-8')
name = ud.normalize('NFC',name)
authorIdx = self.authordict.get(name)
#content=[]
self.myidx = authorIdx
self.debugmsg('get the feature vector', 0)
featuretfidf = self.tfidfarray[authorIdx]
self.debugmsg('start distance computing \n', 0)
(self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)
self.debugmsg('end distance computing \n', 0)
# here we can define the range to apply the otsu for recommendations
# for example self.closeauthordis[0:1000] or all them
self.debugmsg('start otsuifilter\n', 0)
splitidx = self.otsufilter(self.closeauthordis)
self.debugmsg('end otsufilter\n', 0)
# splitidx contains the first index of three groups, close, medium, far
# now generate three recommendations in each group
recommendations = []
# save the valid remdidx
remdidx = []
for i in splitidx:
n = 0
backwardcount = 1
while n != self.nremd:
if self.closeauthors[i] != self.myidx:
# skip myself go to next one
remdinfo = self.getremdinfoV2(i)
if remdinfo and ~remdidx.count(i):
#print remdinfo
recommendations.append(remdinfo)
n = n + 1
remdidx.append(i)
#self.debugmsg(str(n) + ' ' + str(i), 0)
i = i + 1
# didn't find required number of valid remd untill the end
# start backwards search
if (i == len(self.closeauthordis)) or (backwardcount > 1):
if backwardcount == 1:
backwardstart = i - self.nremd
i = backwardstart - backwardcount
backwardcount = backwardcount + 1
#self.debugmsg('search backward ' + str(i), 0)
# randomlize the order of the recommendations
random.shuffle(recommendations)
self.result=OrderedDict([("name",name),("recommendations",recommendations)])
self.debugmsg('end recommendationV4 \n', 0)
return self.result
"""
find n nearset neighbors of point p in given space using linear search
if n == 0, sort all the points in space
"""
def nNNlinesearch(self, space, p, n):
closeauthordis = []
closeauthordis = pairwise_distances(space, p, metric='cosine')
closeauthordis = closeauthordis.flatten()
closeauthors = closeauthordis.argsort()
closeauthordis.sort()
if n > 0 :
closeauthors = closeauthors[0:n]
closeauthordis = closeauthordis[0:n]
# delete myself, cuz the distance is always 0
idx = np.where(closeauthors == self.myidx)[0][0]
closeauthors = np.delete(closeauthors, idx)
closeauthordis = np.delete(closeauthordis, idx)
return (closeauthors, closeauthordis)
"""
split the distance in to 3 groups using otsu filtering
return the first index of each group
"""
def otsufilter(self, tdis):
trd = np.zeros(3, int)
#tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis>t1])
# the first index of each group
# trd[1] = len(tdis[tdis<t1])
# trd[2] = len(tdis) - len(tdis[tdis>t2])
# get the medium 3 in the medium group
# get the last 3 in the far group
trd[1] = len(tdis[tdis<t1]) + int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1
trd[2] = len(tdis) - 3
return trd
"""
extract the detail inforamtion of the recommendation by its indx in
the closeauthors
ignor those unqualified ones which has few papers or not active
recently, and also remove my co-authors
"""
def getremdinfo(self, clsidx):
# get the author index from closeauthors
remdidx = self.closeauthors[clsidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
if idx.count(self.myidx):
# remove the coauthor
return []
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([("name",name), ("relevancy",self.closeauthordis[clsidx]),("coAuthors",coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)])
else:
return []
"""
extract the detail inforamtion of the recommendation by its indx in
the closeauthors
ignor those unqualified ones which has few papers or not active
recently, and also remove known people in the mention network
"""
def getremdinfoV2(self, clsidx):
# get the author index from closeauthors
remdidx = self.closeauthors[clsidx]
username = self.authors[self.myidx]
recentpub = self.resentpublicationsidx(remdidx)
if recentpub:
name = self.authors[remdidx]
#[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)
mentionlist = self.mentionnetwork[username]
if name in mentionlist:
# skip the coauthor
return []
#
remdid = self.id_name[name]
if self.labels[remdid] == 'org':
return []
# get the recommendation's mention list
coauthors = self.mycoauthorsV4bymentionlist(name)
researchtopic = self.keywordbyidx(remdidx)
return OrderedDict([("name",name), ("relevancy",self.closeauthordis[clsidx]),("coAuthors", coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)])
else:
return []
"""
"""
def updatedistance(self):
# 1st degree connection in coauthorship
deg1con=self.coauthornet[self.myidx,self.closeauthors]
deg1conidx = np.where(deg1con>0)[0]
#deg1con = deg1con[deg1con>0]
# 2nd degree connection in coauthorship
deg2conidx = np.where(deg1con==0)[0]
deg2con = np.zeros(deg2conidx.size)
for i in self.closeauthors[deg1conidx]:
deg2con = deg2con + self.coauthornet[i,self.closeauthors[deg2conidx]]
deg1con = deg1con[deg1con>0]
deg1con = deg1con/max(deg1con)
return (deg1conidx, deg1con,deg2conidx,deg2con)
"""
return the top N recommendations:
recommendations, coauthors, researchtopics, recentpub(at least 3 and no
morethan 5 years)
"""
def filteredrecommendations(self, n):
recommendations = []
self.filteridx = []
self.filteredauthors = []
i = 0
for name in self.recommendauthor:
#coauthors = []
#researchtopic = []
#recentpub = []
#coauthorsjson = []
#[coauthors, idx, c] = self.mycoauthors(name)
#[coauthors, idx, c] = self.mycoauthorsV2(name)
#[coauthors, idx, c] = self.mycoauthorsV3(name)
[coauthors, idx, c] = self.mycoauthorsV4(name)
# remove the coauthors
if idx.count(self.myidx):
i = i+1
continue
recentpub = self.resentpublications(name)
# check if the recentpub is empty which is not active anymore
if not recentpub:
i = i+1
continue
# --
self.filteredauthors.append(name)
# take too much time skip in test
# researchtopic = self.keyword(name)
researchtopic = []
researchtopic.append(OrderedDict([("topic", "TBD")]))
#recommendations.append({'name':name, 'coAuthors':coauthors, 'researchTopcs':researchtopic, 'recentPublications':recentpub} )
recommendations.append(OrderedDict([("name",name), ("relevancy",self.closeauthordis[i]),("coAuthors",coauthors),("researchTopics",researchtopic), ("recentPublications",recentpub)]))
#result={'name':user, 'recommendations':recommendations};
# save the picked idx
self.filteridx.append(i)
i = i+1
# only need top n recommendations
if len(self.filteridx) == n:
break
return recommendations
"""
"""
def thresholdrecommendations(self, remds,n):
thredremd = []
self.trd = np.zeros(3)
tdis = self.filteredcloseauthordis()
t1 = filters.threshold_otsu(tdis)
t2 = filters.threshold_otsu(tdis[tdis>t1])
# get the top 3 in each group
self.trd[1] = len(tdis[tdis<t1])
self.trd[2] = len(tdis) - len(tdis[tdis>t2])
# get the top 3 in first group, median 3 in second group,
# last 3 in third group
# self.trd[1] = int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1
# self.trd[2] = len(tdis) - 3
for i in range(3):
for j in range(int(n/3)):
k = int(self.trd[i]+j)
name = remds[k]['name']
researchtopic = self.keyword(name)
remds[k]['researchTopics'] = researchtopic
thredremd.append(remds[k])
return thredremd
"""
"""
def filteredcloseauthordis(self):
return self.closeauthordis[self.filteridx]
"""
"""
def save_json(self,filename):
PROJECT_DIRECTORY = 'output/project/' + project_name + '/'
with io.open(PROJECT_DIRECTORY + filename +'.json','w',encoding="utf-8") as outfile:
outfile.write((json.dumps((self.result), ensure_ascii=False)))
|
flexible
|
{
"blob_id": "4a8a733a965e25ad7ef53600fad6dd47343655b0",
"index": 8677,
"step-1": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n <mask token>\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n <mask token>\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n <mask token>\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n <mask token>\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n <mask token>\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n <mask token>\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n <mask token>\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i, n], [count1[coauthors1.index(i)],\n count2[coauthors2.index(n)], count3[coauthors3.\n index(userY)]]])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n <mask token>\n <mask token>\n\n def textnormalizing(self, text):\n c = 0\n for i in text:\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n c = c + 1\n return text\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n <mask token>\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def updatedistance(self):\n deg1con = self.coauthornet[self.myidx, self.closeauthors]\n deg1conidx = np.where(deg1con > 0)[0]\n deg2conidx = np.where(deg1con == 0)[0]\n deg2con = np.zeros(deg2conidx.size)\n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i, self.closeauthors[\n deg2conidx]]\n deg1con = deg1con[deg1con > 0]\n deg1con = deg1con / max(deg1con)\n return deg1conidx, deg1con, deg2conidx, deg2con\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n\n def resentpublications(self, name):\n resentpub = []\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n idx = self.authortitlesidx[idx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def keywordbyidx(self, idx):\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def bigramkeywords(self, text):\n content = text\n userpairs = list(nltk.bigrams(content))\n keywordsbackup = []\n keywords = []\n for p in userpairs:\n pairsdic = self.cfd[p[0]]\n n = pairsdic[p[1]]\n if n >= self.keywordthreshold:\n keywords.append((p, n))\n keywordsbackup.append((p, n))\n finalkeywords = []\n uniqkeywords = set(keywords)\n keywords = sorted(uniqkeywords, key=lambda keywords: keywords[1])\n for p in keywords:\n if p[1] >= 25 or userpairs.count(p[0]) > 1:\n finalkeywords.append([' '.join(p[0]), p[1], userpairs.count\n (p[0])])\n finalkeywords.reverse()\n if not finalkeywords:\n uniqkeywords = set(keywordsbackup)\n keywordsbackup = sorted(uniqkeywords, key=lambda keywordsbackup:\n keywordsbackup[1])\n finalkeywords.append([' '.join(keywordsbackup[-1][0]),\n keywordsbackup[-1][1], userpairs.count(keywordsbackup[0])])\n else:\n pluralidx = self.findpluralbigram(finalkeywords)\n self.removepluralbigram(finalkeywords, pluralidx)\n return finalkeywords\n <mask token>\n <mask token>\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n\n def mycoauthorsV2(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthorship = self.coauthornetV2[idx]\n uniqcoauthors = np.array(list(set(coauthorship)))\n coauthorcount = []\n for i in uniqcoauthors:\n coauthorcount.append(coauthorship.count(i))\n countidx = np.argsort(coauthorcount)\n countidx = countidx[::-1]\n coauthorcount = np.array(coauthorcount)\n result = []\n for i in countidx:\n result.append(OrderedDict([('name', self.authors[uniqcoauthors[\n i]]), ('cooperationCount', coauthorcount[i])]))\n return result, list(uniqcoauthors[countidx]), list(coauthorcount[\n countidx])\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4bymentionlist(self, name):\n if name in self.mentionnetwork.keys():\n mentiondict = self.mentionnetwork[name]\n else:\n mentiondict = {'None': 0}\n result = []\n sorted_mentiondict = sorted(mentiondict.items(), key=operator.\n itemgetter(1), reverse=True)\n for i in sorted_mentiondict:\n result.append(OrderedDict([('name', i[0]), ('cooperationCount',\n i[1])]))\n return result\n <mask token>\n <mask token>\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i, n], [count1[coauthors1.index(i)],\n count2[coauthors2.index(n)], count3[coauthors3.\n index(userY)]]])\n <mask token>\n <mask token>\n <mask token>\n\n def getVenue(self, userIdx):\n venues = self.authorbooktitleidx[userIdx]\n c = Counter(venues)\n frqvenues = c.most_common()\n return frqvenues[0][0]\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n\n def networksimilarity(self, userX, userY, year):\n coauthors, c = self.mycoauthorsbyyear(userX, year)\n edgesFG = len(coauthors)\n n = 0\n for i in coauthors:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(coauthors[n:]))\n edgesFG = edgesFG + len(con)\n n = n + 1\n weakties, cx, cy = self.weakties(userX, userY, year)\n edgesMFG = 2 * len(weakties)\n n = 0\n for i in weakties:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(weakties[n:]))\n edgesMFG = edgesMFG + len(con)\n n = n + 1\n if edgesFG * edgesMFG:\n ns = np.log(edgesMFG) / np.log(2 * edgesFG)\n else:\n ns = -1\n return ns, edgesFG, edgesMFG, cx, cy\n <mask token>\n\n def textnormalizing(self, text):\n c = 0\n for i in text:\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n c = c + 1\n return text\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n\n def updatecoauthornetworkV2(self, net, authors, namelist):\n nameidx = []\n for name in namelist:\n nameidx.append(authors.index(name))\n for i in nameidx:\n tmpidx = nameidx[:]\n tmpidx.remove(i)\n if not net:\n net.append(tmpidx)\n elif i > len(net) - 1:\n net.append(tmpidx)\n else:\n net[i].extend(tmpidx)\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n\n def mention_network(self):\n f = codecs.open(self.f_mentionnetwork, 'r', 'utf-8')\n source = ''\n network = {}\n for line in f:\n items = line.split('\"')\n if source == '':\n source = items[0]\n target = {}\n if source == items[0]:\n target[items[1]] = int(items[2])\n else:\n network[items[0]] = target\n source = items[0]\n target = {}\n f.close()\n return network\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def updatedistance(self):\n deg1con = self.coauthornet[self.myidx, self.closeauthors]\n deg1conidx = np.where(deg1con > 0)[0]\n deg2conidx = np.where(deg1con == 0)[0]\n deg2con = np.zeros(deg2conidx.size)\n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i, self.closeauthors[\n deg2conidx]]\n deg1con = deg1con[deg1con > 0]\n deg1con = deg1con / max(deg1con)\n return deg1conidx, deg1con, deg2conidx, deg2con\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n\n def filteredcloseauthordis(self):\n return self.closeauthordis[self.filteridx]\n <mask token>\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass recommendationsys:\n\n def __init__(self, nyear):\n self.activityyear = 10\n self.debug = 0\n self.nremd = 3\n PROJECT_DIRECTORY = 'output/project/' + project_name\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n <mask token>\n\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n <mask token>\n\n def resentpublicationsidx(self, authoridx):\n resentpub = []\n idx = self.authortitlesidx[authoridx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n <mask token>\n\n def resentpublications(self, name):\n resentpub = []\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n idx = self.authortitlesidx[idx]\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n if int(self.years[idx[0]]) < self.nyear or len(idx) < self.npaper:\n return resentpub\n for i in idx:\n authorsjson = []\n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([('name', author)]))\n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\n '%Y-%m-%d %H:%M:%S')\n resentpub.append(OrderedDict([('title', self.rawtitles[i]), (\n 'authors', authorsjson), ('year', date), (\n 'publicationVenue', self.booktitle[i])]))\n return resentpub\n\n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs = []\n pairs = nltk.bigrams(self.allcorp)\n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n\n def keyword(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def keywordbyidx(self, idx):\n contentjson = []\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n for topic in finalkeywords:\n contentjson.append(OrderedDict([('topic', topic[0])]))\n return contentjson\n <mask token>\n\n def bigramkeywords(self, text):\n content = text\n userpairs = list(nltk.bigrams(content))\n keywordsbackup = []\n keywords = []\n for p in userpairs:\n pairsdic = self.cfd[p[0]]\n n = pairsdic[p[1]]\n if n >= self.keywordthreshold:\n keywords.append((p, n))\n keywordsbackup.append((p, n))\n finalkeywords = []\n uniqkeywords = set(keywords)\n keywords = sorted(uniqkeywords, key=lambda keywords: keywords[1])\n for p in keywords:\n if p[1] >= 25 or userpairs.count(p[0]) > 1:\n finalkeywords.append([' '.join(p[0]), p[1], userpairs.count\n (p[0])])\n finalkeywords.reverse()\n if not finalkeywords:\n uniqkeywords = set(keywordsbackup)\n keywordsbackup = sorted(uniqkeywords, key=lambda keywordsbackup:\n keywordsbackup[1])\n finalkeywords.append([' '.join(keywordsbackup[-1][0]),\n keywordsbackup[-1][1], userpairs.count(keywordsbackup[0])])\n else:\n pluralidx = self.findpluralbigram(finalkeywords)\n self.removepluralbigram(finalkeywords, pluralidx)\n return finalkeywords\n <mask token>\n\n def removepluralbigram(self, bigram, pluralidx):\n if not pluralidx:\n print('empty')\n return\n delcount = 0\n pren = 0\n for i in pluralidx:\n for n in i[1:]:\n if n > pren:\n n = n - delcount\n bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]\n bigram.remove(bigram[n])\n delcount = delcount + 1\n pren = n\n <mask token>\n\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n c.append(t1)\n uniqbigram = list(set(c))\n pluralidx = []\n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n return pluralidx\n <mask token>\n\n def mycoauthorsV2(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthorship = self.coauthornetV2[idx]\n uniqcoauthors = np.array(list(set(coauthorship)))\n coauthorcount = []\n for i in uniqcoauthors:\n coauthorcount.append(coauthorship.count(i))\n countidx = np.argsort(coauthorcount)\n countidx = countidx[::-1]\n coauthorcount = np.array(coauthorcount)\n result = []\n for i in countidx:\n result.append(OrderedDict([('name', self.authors[uniqcoauthors[\n i]]), ('cooperationCount', coauthorcount[i])]))\n return result, list(uniqcoauthors[countidx]), list(coauthorcount[\n countidx])\n <mask token>\n\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4(self, name):\n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4byidx(self, idx):\n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([('name', self.authors[unicoauthors[-\n (i + 1)]]), ('cooperationCount', coauthorcount[-(i + 1)])]))\n return result, list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mycoauthorsV4bymentionlist(self, name):\n if name in self.mentionnetwork.keys():\n mentiondict = self.mentionnetwork[name]\n else:\n mentiondict = {'None': 0}\n result = []\n sorted_mentiondict = sorted(mentiondict.items(), key=operator.\n itemgetter(1), reverse=True)\n for i in sorted_mentiondict:\n result.append(OrderedDict([('name', i[0]), ('cooperationCount',\n i[1])]))\n return result\n <mask token>\n\n def mycoauthorsbyyear(self, idx, year):\n years = np.array(self.years)\n yearidx = np.where(years <= year)[0]\n coauthorsidx = [self.coauthorsidx[i] for i in yearidx]\n coauthors = []\n for i in coauthorsidx:\n if idx in i:\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n return list(unicoauthors[::-1]), list(coauthorcount[::-1])\n <mask token>\n\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n return newCoauthors\n <mask token>\n <mask token>\n <mask token>\n\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i, n], [count1[coauthors1.index(i)],\n count2[coauthors2.index(n)], count3[coauthors3.\n index(userY)]]])\n <mask token>\n\n def getcontentbyyear(self, userIdx, year):\n titleIdx = self.authortitlesidx[userIdx]\n titleIdx = np.array(titleIdx)\n years = [self.years[i] for i in titleIdx]\n years = np.array(years)\n years.sort()\n years = years[::-1]\n yearIdx = np.where(years <= year)[0]\n content = [self.titles[i] for i in titleIdx[yearIdx]]\n return content\n <mask token>\n\n def getVenue(self, userIdx):\n venues = self.authorbooktitleidx[userIdx]\n c = Counter(venues)\n frqvenues = c.most_common()\n return frqvenues[0][0]\n <mask token>\n\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n contents = []\n for i in contentX:\n contents.extend(i.split(' '))\n lenx = len(contents)\n for i in contentY:\n contents.extend(i.split(' '))\n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents]\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(\n newcontentY)])\n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0\n ]\n return cosinesimilarity\n <mask token>\n\n def networksimilarity(self, userX, userY, year):\n coauthors, c = self.mycoauthorsbyyear(userX, year)\n edgesFG = len(coauthors)\n n = 0\n for i in coauthors:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(coauthors[n:]))\n edgesFG = edgesFG + len(con)\n n = n + 1\n weakties, cx, cy = self.weakties(userX, userY, year)\n edgesMFG = 2 * len(weakties)\n n = 0\n for i in weakties:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(weakties[n:]))\n edgesMFG = edgesMFG + len(con)\n n = n + 1\n if edgesFG * edgesMFG:\n ns = np.log(edgesMFG) / np.log(2 * edgesFG)\n else:\n ns = -1\n return ns, edgesFG, edgesMFG, cx, cy\n <mask token>\n\n def textnormalizing(self, text):\n c = 0\n for i in text:\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n c = c + 1\n return text\n <mask token>\n <mask token>\n\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n dis = dismatrix[idx, nth]\n self.mindis = min(dis)\n self.maxdis = max(dis)\n self.radius = self.maxdis\n <mask token>\n\n def showcontents(self, labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n return contents\n <mask token>\n\n def digstring(self, s):\n for i in s:\n if i.isdigit():\n return True\n return False\n <mask token>\n\n def distance(self, a, b):\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n a = np.array(a)\n b = np.array(b)\n return np.sqrt(sum(np.square(a - b)))\n <mask token>\n\n def updatecoauthornetworkV2(self, net, authors, namelist):\n nameidx = []\n for name in namelist:\n nameidx.append(authors.index(name))\n for i in nameidx:\n tmpidx = nameidx[:]\n tmpidx.remove(i)\n if not net:\n net.append(tmpidx)\n elif i > len(net) - 1:\n net.append(tmpidx)\n else:\n net[i].extend(tmpidx)\n <mask token>\n\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel, 'r', 'utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1]\n f.close()\n self.labels = labels\n <mask token>\n\n def mention_network(self):\n f = codecs.open(self.f_mentionnetwork, 'r', 'utf-8')\n source = ''\n network = {}\n for line in f:\n items = line.split('\"')\n if source == '':\n source = items[0]\n target = {}\n if source == items[0]:\n target[items[1]] = int(items[2])\n else:\n network[items[0]] = target\n source = items[0]\n target = {}\n f.close()\n return network\n <mask token>\n\n def docluster(self):\n tokenizer = RegexpTokenizer('\\\\w+')\n self.rawtitles = []\n self.titles = []\n self.allcorp = []\n sw = set(nltk.corpus.stopwords.words('english'))\n self.debugmsg('start titles \\n', 0)\n f = codecs.open(self.f_titles, 'r', 'utf-8')\n for line in f:\n if line[-1] == '\\n':\n line = line[:-1]\n self.rawtitles.append(line)\n line = line.lower()\n tokenlist = tokenizer.tokenize(line)\n self.allcorp += tokenlist\n tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in\n sw) & ~self.digstring(w)])\n self.titles.append(tokenlist)\n f.close()\n self.authordict = {}\n self.authors = []\n self.authorcontents = []\n self.authorrawcontents = []\n self.authortitlesidx = []\n self.authorbooktitleidx = []\n self.coathors = []\n self.coauthorsidx = []\n self.mentionnetwork = {}\n self.id_name = {}\n self.coauthornetV2 = []\n self.mentionnetwork = self.mention_network()\n self.debugmsg('start year \\n', 0)\n self.years = []\n f = codecs.open(self.f_years, 'r', 'utf-8')\n for line in f:\n if line[-1] == '\\n':\n line = line[:-1]\n if line == '':\n line = 0\n timestamp = time.mktime(parser.parse(line).timetuple())\n self.years.append(int(timestamp))\n f.close()\n self.debugmsg('start booktitle \\n', 0)\n self.booktitle = []\n f = codecs.open(self.f_booktitle, 'r', 'utf-8')\n for line in f:\n line = line[:-1]\n self.booktitle.append(line)\n f.close()\n self.debugmsg('start authors \\n', 0)\n i = 0\n m = 0\n f = codecs.open(self.f_authors, 'r', 'utf-8')\n for line in f:\n line = line[:-1]\n newline = line.split(',')\n namelist = newline\n self.coathors.append(namelist)\n authoridx = []\n for name in newline:\n idx = self.authordict.get(name)\n if idx is not None:\n self.authortitlesidx[idx].append(i)\n self.authorbooktitleidx[idx].append(i)\n self.authorcontents[idx] = self.authorcontents[idx\n ] + ' ' + self.titles[i]\n self.authorrawcontents[idx] = self.authorrawcontents[idx\n ] + ' ' + self.rawtitles[i]\n else:\n self.authors.append(name)\n self.authordict[name] = m\n self.authorcontents.append(self.titles[i])\n self.authorrawcontents.append(self.rawtitles[i])\n self.authortitlesidx.append([i])\n self.authorbooktitleidx.append([i])\n idx = m\n m = m + 1\n authoridx.append(idx)\n self.coauthorsidx.append(authoridx)\n i = i + 1\n f.close()\n f = codecs.open(self.f_authors_id, 'r', 'utf-8')\n i = 0\n preline = ''\n for line in f:\n if preline != line:\n if line[-1] == '\\n':\n newline = line[:-1]\n self.id_name[self.authors[i]] = newline\n preline = line\n i = i + 1\n else:\n continue\n f.close()\n self.per_org_label()\n self.vectorizer = CountVectorizer(max_df=0.95, min_df=1, stop_words\n ='english')\n X = self.vectorizer.fit_transform(self.authorcontents)\n Xarray = X\n transformer = TfidfTransformer()\n self.tfidf = transformer.fit_transform(Xarray)\n self.tfidfarray = self.tfidf\n self.featurenames = self.vectorizer.get_feature_names()\n <mask token>\n\n def recommendationV3(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfo(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV3 \\n', 0)\n return self.result\n <mask token>\n\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' +\n str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n else:\n name = name.decode('utf-8')\n name = ud.normalize('NFC', name)\n authorIdx = self.authordict.get(name)\n self.myidx = authorIdx\n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n self.debugmsg('start distance computing \\n', 0)\n self.closeauthors, self.closeauthordis = self.nNNlinesearch(self.\n tfidfarray, featuretfidf, 0)\n self.debugmsg('end distance computing \\n', 0)\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis)\n self.debugmsg('end otsufilter\\n', 0)\n recommendations = []\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n i = i + 1\n if i == len(self.closeauthordis) or backwardcount > 1:\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n random.shuffle(recommendations)\n self.result = OrderedDict([('name', name), ('recommendations',\n recommendations)])\n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result\n <mask token>\n\n def nNNlinesearch(self, space, p, n):\n closeauthordis = []\n closeauthordis = pairwise_distances(space, p, metric='cosine')\n closeauthordis = closeauthordis.flatten()\n closeauthors = closeauthordis.argsort()\n closeauthordis.sort()\n if n > 0:\n closeauthors = closeauthors[0:n]\n closeauthordis = closeauthordis[0:n]\n idx = np.where(closeauthors == self.myidx)[0][0]\n closeauthors = np.delete(closeauthors, idx)\n closeauthordis = np.delete(closeauthordis, idx)\n return closeauthors, closeauthordis\n <mask token>\n\n def otsufilter(self, tdis):\n trd = np.zeros(3, int)\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n trd[1] = len(tdis[tdis < t1]) + int((len(tdis[tdis < t2]) - len(\n tdis[tdis < t1])) / 2) - 1\n trd[2] = len(tdis) - 3\n return trd\n <mask token>\n\n def getremdinfo(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n if idx.count(self.myidx):\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def getremdinfoV2(self, clsidx):\n remdidx = self.closeauthors[clsidx]\n username = self.authors[self.myidx]\n recentpub = self.resentpublicationsidx(remdidx)\n if recentpub:\n name = self.authors[remdidx]\n mentionlist = self.mentionnetwork[username]\n if name in mentionlist:\n return []\n remdid = self.id_name[name]\n if self.labels[remdid] == 'org':\n return []\n coauthors = self.mycoauthorsV4bymentionlist(name)\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([('name', name), ('relevancy', self.\n closeauthordis[clsidx]), ('coAuthors', coauthors), (\n 'researchTopics', researchtopic), ('recentPublications',\n recentpub)])\n else:\n return []\n <mask token>\n\n def updatedistance(self):\n deg1con = self.coauthornet[self.myidx, self.closeauthors]\n deg1conidx = np.where(deg1con > 0)[0]\n deg2conidx = np.where(deg1con == 0)[0]\n deg2con = np.zeros(deg2conidx.size)\n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i, self.closeauthors[\n deg2conidx]]\n deg1con = deg1con[deg1con > 0]\n deg1con = deg1con / max(deg1con)\n return deg1conidx, deg1con, deg2conidx, deg2con\n <mask token>\n\n def filteredrecommendations(self, n):\n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n i = 0\n for name in self.recommendauthor:\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n if idx.count(self.myidx):\n i = i + 1\n continue\n recentpub = self.resentpublications(name)\n if not recentpub:\n i = i + 1\n continue\n self.filteredauthors.append(name)\n researchtopic = []\n researchtopic.append(OrderedDict([('topic', 'TBD')]))\n recommendations.append(OrderedDict([('name', name), (\n 'relevancy', self.closeauthordis[i]), ('coAuthors',\n coauthors), ('researchTopics', researchtopic), (\n 'recentPublications', recentpub)]))\n self.filteridx.append(i)\n i = i + 1\n if len(self.filteridx) == n:\n break\n return recommendations\n <mask token>\n\n def thresholdrecommendations(self, remds, n):\n thredremd = []\n self.trd = np.zeros(3)\n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis > t1])\n self.trd[1] = len(tdis[tdis < t1])\n self.trd[2] = len(tdis) - len(tdis[tdis > t2])\n for i in range(3):\n for j in range(int(n / 3)):\n k = int(self.trd[i] + j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n return thredremd\n <mask token>\n\n def filteredcloseauthordis(self):\n return self.closeauthordis[self.filteridx]\n <mask token>\n\n def save_json(self, filename):\n PROJECT_DIRECTORY = 'output/project/' + project_name + '/'\n with io.open(PROJECT_DIRECTORY + filename + '.json', 'w', encoding=\n 'utf-8') as outfile:\n outfile.write(json.dumps(self.result, ensure_ascii=False))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 12 16:38:22 2017\n\n@author: secoder\n\"\"\"\nimport io\nimport random\nimport nltk\nfrom nltk.tokenize import RegexpTokenizer\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n\nfrom collections import OrderedDict\nfrom collections import Counter\n\nfrom sklearn.metrics import pairwise_distances\n\nimport numpy as np\nimport scipy\n\nimport json\nimport codecs\n\nfrom dateutil import parser\nimport time\nimport datetime\n\nimport operator\n\n#import cPickle as pickle\n#\n#import traceback\n\nfrom skimage import filters\n\nimport unicodedata as ud\n\nfrom config import project_name\n\n\nclass recommendationsys:\n \n def __init__(self, nyear):\n \n # by default we will filter out those don't have publications in recent 10 years\n self.activityyear = 10\n\n self.debug = 0 \n self.nremd = 3\n \n #----------------------\n PROJECT_DIRECTORY = 'output/project/' + project_name\n\n self.f_titles = PROJECT_DIRECTORY + '/cleantitles_target.txt'\n self.f_authors = PROJECT_DIRECTORY + '/authors_target.txt'\n self.f_years = PROJECT_DIRECTORY + '/years_target.txt'\n self.f_booktitle = PROJECT_DIRECTORY + '/venues_target.txt'\n self.f_mentionnetwork = PROJECT_DIRECTORY + '/mention_network.txt'\n self.f_perorglabel = PROJECT_DIRECTORY + '/per_org.txt'\n self.f_authors_id = PROJECT_DIRECTORY + '/authors_id_target.txt'\n\n \n self.npaper = 10\n self.nyear = time.mktime(parser.parse(str(nyear)).timetuple())\n self.keywordthreshold = 10\n #---------------------- \n \n self.debugmsg('start init', 0)\n self.docluster()\n self.initNLTKConditionalFreqDist()\n \n self.filterN = len(self.authors)\n self.debugmsg('end init\\n', 0)\n \n \n \"\"\"\n \"\"\"\n def debugmsg(self, msg, lvl):\n if self.debug <= lvl:\n print(msg)\n \n \"\"\"\n \"\"\" \n def resentpublicationsidx(self,authoridx):\n #print 'start recentpublications\\n'\n resentpub = []\n \n idx = self.authortitlesidx[authoridx]\n \n # sort by years\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n \n # if the most recent publication is before the 'nyears' \n # remove this one from the list\n if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):\n return resentpub\n # ---- \n \n for i in idx:\n authorsjson = [] \n \n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([(\"name\",author)]))\n \n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\"%Y-%m-%d %H:%M:%S\")\n resentpub.append(OrderedDict([(\"title\",self.rawtitles[i]),(\"authors\",authorsjson), (\"year\",date),(\"publicationVenue\",self.booktitle[i])]))\n\n #print 'end recentpublications\\n'\n return resentpub\n \n \n \"\"\"\n \"\"\"\n def resentpublications(self,name):\n #print 'start recentpublications\\n'\n resentpub = []\n\n #if isinstance(name, unicode): for python 2.7\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n idx = self.authortitlesidx[idx]\n \n # sort by years\n years = [self.years[i] for i in idx]\n years = np.array(years)\n years = years.argsort()\n idx = np.array(idx)[years]\n idx = idx.tolist()\n idx.reverse()\n \n # if the most recent publication is before the 'nyears' \n # remove this one from the list\n if (int(self.years[idx[0]]) < self.nyear) or (len(idx) < self.npaper):\n return resentpub\n # ---- \n \n for i in idx:\n authorsjson = [] \n \n for author in self.coathors[i]:\n authorsjson.append(OrderedDict([(\"name\",author)]))\n \n date = datetime.datetime.fromtimestamp(self.years[i]).strftime(\"%Y-%m-%d %H:%M:%S\")\n resentpub.append(OrderedDict([(\"title\",self.rawtitles[i]),(\"authors\",authorsjson), (\"year\",date),(\"publicationVenue\",self.booktitle[i])]))\n\n #print 'end recentpublications\\n'\n return resentpub\n \n def initNLTKConditionalFreqDist(self):\n self.debugmsg('start initNLTK CFD\\n', 0)\n pairs=[]\n\n# for title in self.titles:\n# pairs = pairs + list(nltk.bigrams(title.split()))\n\n pairs = nltk.bigrams(self.allcorp)\n \n self.cfd = nltk.ConditionalFreqDist(pairs)\n self.debugmsg('end initNLTK CFD\\n', 0)\n \n def keyword(self,name):\n #print 'start keyword\\n'\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n# content = self.authorcontents[idx].lower()\n# \n# # get the unique words from the content\n# content = set(content.split())\n# \n# i = []\n# for c in content:\n# count = self.vectorizer.vocabulary_.get(c, 0) \n# i.append(count)\n# \n# i = np.array(i)\n# i = i.argsort()\n# content = np.array(list(content))\n# content = content[i]\n# content = content[-3:]\n# keywords = list(reversed(content)) \n# \n contentjson = []\n# for topic in keywords:\n# contentjson.append(OrderedDict([(\"topic\", topic)]))\n \n # bigram keywords -------------\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n# #print 'start bigram\\n'\n# \n# userpairs = list(nltk.bigrams(content))\n# \n# \n# # do the same on raw titles \n# \n# keywordsraw=[]\n# for p in userpairs:\n# pairsdic=self.cfd[p[0]]\n# n=pairsdic[p[1]]\n# if n>=2:\n# keywordsraw.append((p,n))\n# \n# uniqkeywords=set(keywordsraw)\n# keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])\n# \n# finalkeywords=[]\n# for p in keywords:\n# #c=wn.synsets(p[0][1])[0].pos()\n# if (p[1]>=2):\n# finalkeywords.append((' '.join(p[0]),p[1],keywordsraw.count(p)))\n# \n# finalkeywords.reverse()\n \n for topic in finalkeywords:\n #print topic[0]\n contentjson.append(OrderedDict([(\"topic\", topic[0])]))\n \n #print 'end bigram\\n'\n #print 'end keyword\\n'\n return contentjson\n \n \"\"\"\n \"\"\"\n def keywordbyidx(self,idx):\n \n contentjson = []\n \n # bigram keywords -------------\n content = self.authorcontents[idx].lower().split()\n finalkeywords = self.bigramkeywords(content)\n \n for topic in finalkeywords:\n #print topic[0]\n contentjson.append(OrderedDict([(\"topic\", topic[0])]))\n \n return contentjson\n\n \n \"\"\"\n \"\"\" \n def bigramkeywords(self, text):\n #print 'start bigramkeyword\\n'\n # bigram keywords -------------\n #content = text.lower().split()\n content = text\n #print 'start bigram\\n'\n \n userpairs = list(nltk.bigrams(content))\n \n \n # in case there is no valid keywords due to our requirement\n # the one with highest occurrence will be pick from the backup plan \n keywordsbackup = []\n # the valid keywords\n keywords=[]\n for p in userpairs:\n pairsdic=self.cfd[p[0]]\n n=pairsdic[p[1]]\n if n>=self.keywordthreshold:\n keywords.append((p,n))\n keywordsbackup.append((p,n))\n\n finalkeywords=[]\n \n uniqkeywords=set(keywords)\n keywords=sorted(uniqkeywords, key=lambda keywords: keywords[1])\n for p in keywords:\n if (p[1]>=25) or (userpairs.count(p[0])>1):\n finalkeywords.append([' '.join(p[0]),p[1],userpairs.count(p[0])])\n \n finalkeywords.reverse() \n \n \n \n if not finalkeywords:\n # found valid keywords\n uniqkeywords=set(keywordsbackup)\n keywordsbackup=sorted(uniqkeywords, key=lambda keywordsbackup: keywordsbackup[1])\n finalkeywords.append([' '.join(keywordsbackup[-1][0]), keywordsbackup[-1][1],userpairs.count(keywordsbackup[0])])\n else: \n # deal with plural\n pluralidx = self.findpluralbigram(finalkeywords)\n \n self.removepluralbigram(finalkeywords,pluralidx)\n \n \n #print 'end bigramkeyword\\n'\n return finalkeywords\n \n \"\"\"\n \"\"\"\n def removepluralbigram(self, bigram, pluralidx):\n # if pluralidx is emtpy, just return\n if not pluralidx:\n print('empty')\n return \n \n delcount = 0\n pren = 0 \n\n for i in pluralidx:\n #delcount = 0\n for n in i[1:]:\n if n > pren:\n n = n - delcount\n\n bigram[i[0]][1] = bigram[i[0]][1] + bigram[n][1]\n bigram.remove(bigram[n])\n delcount = delcount + 1\n pren = n\n \n \n \"\"\"\n \"\"\"\n def findpluralbigram(self, keywordsinfo):\n c = []\n for i in keywordsinfo:\n t = i[0].split()\n t1 = ''\n for n in t:\n if n[-1] == 's':\n n = n[:-1]\n t1 = t1 + n\n\n c.append(t1)\n \n uniqbigram = list(set(c))\n pluralidx = []\n \n for i in uniqbigram:\n count = c.count(i)\n if count > 1:\n cc = []\n for n in range(len(c)):\n if i == c[n]:\n cc.append(n)\n pluralidx.append(cc)\n \n return pluralidx\n \"\"\"\n \"\"\"\n def mycoauthorsV2(self, name):\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n coauthorship = self.coauthornetV2[idx]\n uniqcoauthors = np.array(list(set(coauthorship)))\n coauthorcount = []\n for i in uniqcoauthors:\n coauthorcount.append(coauthorship.count(i))\n \n countidx = np.argsort(coauthorcount)\n # reverse it to descend order\n countidx = countidx[::-1]\n \n coauthorcount = np.array(coauthorcount)\n \n result = []\n for i in countidx:\n result.append(OrderedDict([(\"name\",self.authors[uniqcoauthors[i]]),(\"cooperationCount\",coauthorcount[i])]))\n return (result,list(uniqcoauthors[countidx]),list(coauthorcount[countidx]))\n \n \n \n \"\"\"\n \"\"\"\n def mycoauthorsV3(self, name):\n if isinstance(name, str):\n #idx = self.authors.index(name)\n idx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n idx = self.authordict.get(name.decode('utf-8'))\n \n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([(\"name\",self.authors[unicoauthors[-(i+1)]]),(\"cooperationCount\",coauthorcount[-(i+1)])]))\n return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n\n \"\"\"\n \"\"\"\n def mycoauthorsV4(self, name):\n \n if isinstance(name, str):\n idx = self.authordict.get(name)\n else:\n idx = self.authordict.get(name.decode('utf-8'))\n \n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([(\"name\",self.authors[unicoauthors[-(i+1)]]),(\"cooperationCount\",coauthorcount[-(i+1)])]))\n \n \n return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n \n \"\"\"\n \"\"\"\n def mycoauthorsV4byidx(self, idx):\n \n coauthors = []\n for i in self.coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n result = []\n for i in range(len(coauthorcount)):\n result.append(OrderedDict([(\"name\",self.authors[unicoauthors[-(i+1)]]),(\"cooperationCount\",coauthorcount[-(i+1)])]))\n \n \n return (result,list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n \n \"\"\"\n \"\"\"\n def mycoauthorsV4bymentionlist(self, name):\n \n if name in self.mentionnetwork.keys():\n mentiondict = self.mentionnetwork[name]\n else:\n mentiondict ={'None':0}\n \n \n result = []\n # sort by mention counts\n sorted_mentiondict = sorted(mentiondict.items(), key=operator.itemgetter(1), reverse=True)\n \n for i in sorted_mentiondict:\n result.append(OrderedDict([(\"name\",i[0]),(\"cooperationCount\",i[1])]))\n \n return result\n \"\"\"\n \"\"\"\n def mycoauthorsbyyear(self, idx, year):\n \n years = np.array(self.years)\n\n yearidx = np.where(years <= year)[0]\n coauthorsidx = [ self.coauthorsidx[i] for i in yearidx]\n \n coauthors = []\n for i in coauthorsidx:\n if idx in i:\n # remove itself\n t = i[:]\n t.remove(idx)\n coauthors.extend(t)\n \n coauthors = np.array(coauthors)\n unicoauthors, coauthorcount = np.unique(coauthors, return_counts=True)\n \n unicoauthors = unicoauthors[coauthorcount.argsort()]\n coauthorcount.sort()\n \n return (list(unicoauthors[::-1]),list(coauthorcount[::-1]))\n \n \"\"\"\n find the new coauthors for a user in current year against previous year\n example: mynewcoauthors(23, 2014, 2015) will returen the new coauthors\n in 2015 regarding the year 2014 for user 23. 23 is the index of a user\n \"\"\"\n def mynewcoauthors(self, userIdx, yearPre, yearCur):\n coauthornetPre, cp = self.mycoauthorsbyyear(userIdx, yearPre)\n\n coauthornetCur, cc = self.mycoauthorsbyyear(userIdx, yearCur)\n\n newCoauthors = np.setdiff1d(coauthornetCur, coauthornetPre)\n \n return newCoauthors\n\n \"\"\"\n Call the weakties after mynewcoauthors() to find the common nodes \n between a user and his/her coming new coauthors in the year before\n their coauthorship\n \"\"\"\n def weakties(self, userX, userY, year):\n \n coauthornetX, cx = self.mycoauthorsbyyear(userX, year)\n \n # if userX and userY already have a strong ties, just return []\n if userY in coauthornetX:\n return ([], [], [])\n \n coauthornetY, cy = self.mycoauthorsbyyear(userY, year)\n \n # find the common nodes \n weaktienodes = list(set(coauthornetX).intersection(coauthornetY))\n \n nodescountX = []\n nodescountY = []\n \n if weaktienodes:\n for i in weaktienodes:\n nodescountX.append(cx[coauthornetX.index(i)])\n nodescountY.append(cy[coauthornetY.index(i)])\n \n \n return (weaktienodes, nodescountX, nodescountY)\n \n \"\"\"\n 2nd hoop connection\n \"\"\"\n def secondhoopties(self, userX, userY, year):\n result = []\n coauthors1, count1 = self.mycoauthorsbyyear(userX, 2016)\n\n for i in coauthors1:\n coauthors2, count2 = self.mycoauthorsbyyear(i, 2016)\n for n in coauthors2:\n coauthors3, count3 = self.mycoauthorsbyyear(n, 2016)\n if userY in coauthors3:\n result.append([[i,n],[count1[coauthors1.index(i)],count2[coauthors2.index(n)], count3[coauthors3.index(userY)]]])\n\n\n \"\"\"\n Get all the content(paper titles) of the userIdx before \n the 'year'(include the year) \n \"\"\"\n def getcontentbyyear(self, userIdx, year):\n titleIdx = self.authortitlesidx[userIdx]\n\n titleIdx = np.array(titleIdx)\n\n years = [self.years[i] for i in titleIdx]\n\n years = np.array(years)\n \n # sort the years and put the latest year first\n # then the content will also be sorted by recent paper first\n years.sort()\n years = years[::-1]\n\n yearIdx = np.where(years<=year)[0]\n \n content = [self.titles[i] for i in titleIdx[yearIdx]]\n \n return content\n\n \"\"\"\n return the most frequent participated venue of a user\n \"\"\"\n def getVenue(self, userIdx):\n venues = self.authorbooktitleidx[userIdx]\n c = Counter(venues)\n frqvenues = c.most_common()\n \n return frqvenues[0][0]\n\n \"\"\"\n only consider the recent 10 papers\n \"\"\"\n def contentsimilarity(self, userX, userY, year):\n contentX = self.getcontentbyyear(userX, year)\n if not contentX:\n return -1\n contentX = contentX[0:10]\n \n contentY = self.getcontentbyyear(userY, year)\n if not contentY:\n return -1\n contentY = contentY[0:10]\n \n # build the corpus of all the content\n contents = []\n \n \n for i in contentX:\n contents.extend(i.split(' '))\n \n lenx = len(contents)\n \n for i in contentY:\n contents.extend(i.split(' '))\n \n # normalize the different forms of words \n stemmer = nltk.stem.PorterStemmer()\n stems = [stemmer.stem(t) for t in contents] \n \n # reconstruct content for userX and userY use the normalized words\n newcontentX = stems[0:lenx]\n newcontentY = stems[lenx:]\n\n\n \n vectorizer = CountVectorizer()\n v = vectorizer.fit_transform([' '.join(newcontentX), ' '.join(newcontentY)])\n \n cosinesimilarity = pairwise_distances(v[0], v[1], metric='cosine')[0][0]\n \n return cosinesimilarity\n\n \"\"\"\n network similarity\n \"\"\"\n def networksimilarity(self, userX, userY, year):\n \n # first calculate FG(userX) according to paper\n # User similarities on social networks\n coauthors, c = self.mycoauthorsbyyear(userX, year)\n \n edgesFG = len(coauthors)\n \n n = 0\n for i in coauthors:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(coauthors[n:]))\n edgesFG = edgesFG + len(con)\n n = n + 1\n \n # second, calculate MFG(userX, userY)\n weakties, cx, cy = self.weakties(userX, userY, year)\n \n edgesMFG = 2 * len(weakties)\n \n n = 0\n for i in weakties:\n subcoauthors, c = self.mycoauthorsbyyear(i, year)\n con = list(set(subcoauthors).intersection(weakties[n:]))\n edgesMFG = edgesMFG + len(con)\n n = n + 1\n \n # last calculate the network similarity\n \n if edgesFG * edgesMFG:\n ns = np.log(edgesMFG)/np.log(2 * edgesFG)\n else:\n ns = -1\n \n return (ns, edgesFG, edgesMFG, cx, cy)\n\n \"\"\"\n text processing, normalize the words to their prototype, such as \n plural form, progressive, etc\n \"\"\"\n def textnormalizing(self, text):\n #l = len(text)\n c = 0\n for i in text:\n # network - networks\n if i[-1] == 's':\n ii = i[:-1]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n # bus - buses\n if i[-2:] == 'es':\n ii = i[:-2]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n # study - studies \n if i[-3:] == 'ies':\n ii = i[:-3] + 'y'\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n # network - networking\n # get - getting\n # explore - exploring \n if i[-3:] == 'ing':\n ii = i[:-3]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n ii = i[:-4]\n if ii in text:\n text[c] = ii\n c = c + 1\n continue\n \n ii = i[:-3] + 'e'\n if ii in text:\n text[c] = c + 1\n continue\n \n c = c + 1\n \n return text\n \n \"\"\"\n \"\"\"\n\n \n \"\"\"\n radius of the cluster\n \"\"\"\n def radiusofcluster(self, labels, nth, dismatrix):\n idx = np.where(labels == nth)[0]\n \n dis = dismatrix[idx,nth]\n \n self.mindis = min(dis)\n self.maxdis = max(dis)\n \n self.radius = self.maxdis\n \n \n \n # return [mindis, maxdis, radius]\n \n \n \"\"\"\n show contents in the same cluster\n \"\"\"\n def showcontents(self,labels, nth, allcontents):\n contents = []\n idx = np.where(labels == nth)\n idx = np.array(idx)\n idx = idx.flatten()\n for i in idx:\n contents.append(allcontents[i])\n \n return contents\n \n \"\"\"\n check if there is digtial in the string\n \"\"\"\n def digstring(self,s):\n for i in s:\n if i.isdigit():\n return True\n return False\n \n \"\"\"\n compute the distance between two points a and b\n \"\"\"\n def distance(self,a,b):\n\n if scipy.sparse.issparse(a):\n a = a.toarray()\n a = a[0]\n \n if scipy.sparse.issparse(b):\n b = b.toarray()\n b = b[0]\n \n a = np.array(a);\n b = np.array(b);\n return np.sqrt(sum(np.square(a - b)))\n \n \"\"\"\n \"\"\"\n def updatecoauthornetworkV2(self,net,authors,namelist):\n nameidx = []\n for name in namelist:\n nameidx.append(authors.index(name))\n \n for i in nameidx:\n tmpidx = nameidx[:]\n tmpidx.remove(i)\n # if net is empty\n if not net:\n net.append(tmpidx)\n else:\n if i>len(net)-1:\n net.append(tmpidx)\n else:\n net[i].extend(tmpidx)\n\n \"\"\"\n load the person or organization label\n \"\"\"\n def per_org_label(self):\n f = codecs.open(self.f_perorglabel,'r','utf-8')\n labels = {}\n for line in f:\n items = line.split()\n labels[items[0]] = items[1] \n f.close()\n self.labels = labels\n\n \"\"\"\n \"\"\"\n def mention_network(self):\n f = codecs.open(self.f_mentionnetwork,'r','utf-8')\n source=''\n network = {}\n for line in f:\n items = line.split('\"')\n if source == '':\n source = items[0]\n target = {}\n\t\t\n if source == items[0]:\n target[items[1]] = int(items[2])\n else:\n network[items[0]] = target\n source = items[0]\n target = {}\n \n f.close()\n return network\n \n \n \"\"\"\n \"\"\"\n def docluster(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n\n\n self.rawtitles = []\n self.titles = []\n self.allcorp = []\n\n sw = set(nltk.corpus.stopwords.words('english'))\n \n \n self.debugmsg('start titles \\n', 0)\n f = codecs.open(self.f_titles,'r','utf-8')\n for line in f: \n # remove the '\\n' at the end\n if line[-1] == '\\n':\n line = line[:-1]\n self.rawtitles.append(line)\n line = line.lower()\n tokenlist = tokenizer.tokenize(line)\n \n self.allcorp += tokenlist\n #for corp in newline:\n # self.allcorp.append(corp)\n \n # collect all the words except digtals and stopwords\n tokenlist = ' '.join([w for w in tokenlist if (w.lower() not in sw) & ~(self.digstring(w))])\n self.titles.append(tokenlist)\n f.close()\n # end use codecs\n \n # filename = './CHI/CHI_authors.txt'\n self.authordict = {}\n self.authors = []\n self.authorcontents = []\n self.authorrawcontents = []\n self.authortitlesidx = []\n self.authorbooktitleidx = []\n self.coathors = []\n self.coauthorsidx = [] # undirect link, etc, dblp coauthorship network\n self.mentionnetwork = {} # direct link, etc,tweet mention network\n self.id_name = {}\n \n\n self.coauthornetV2 = []\n \n # readin the mention network\n self.mentionnetwork = self.mention_network()\n \n # read years\n self.debugmsg('start year \\n', 0)\n self.years = []\n \n f = codecs.open(self.f_years,'r','utf-8')\n for line in f:\n # remive \\n\n if line[-1] == '\\n':\n line = line[:-1]\n if line == '':\n line = 0\n #line = line.split()\n #year = line[-1]\n timestamp = time.mktime(parser.parse(line).timetuple())\n self.years.append(int(timestamp))\n f.close()\n \n # read conference \n self.debugmsg('start booktitle \\n', 0)\n self.booktitle = []\n \n f = codecs.open(self.f_booktitle,'r','utf-8')\n for line in f:\n # remove the \\n at the end\n line = line[:-1]\n self.booktitle.append(line)\n f.close()\n \n # read authors\n self.debugmsg('start authors \\n', 0)\n i = 0\n m = 0\n f = codecs.open(self.f_authors,'r','utf-8')\n\n for line in f:\n # remove the last '\\n' \n line = line[:-1]\n # split the authors by ','\n newline = line.split(\",\")\n namelist = newline\n self.coathors.append(namelist) \n \n \n authoridx = []\n \n for name in newline: \n \n # dictonary version \n idx = self.authordict.get(name)\n if idx is not None:\n self.authortitlesidx[idx].append(i)\n self.authorbooktitleidx[idx].append(i)\n\n self.authorcontents[idx] = self.authorcontents[idx] + ' ' + self.titles[i]\n self.authorrawcontents[idx] = self.authorrawcontents[idx] + ' ' + self.rawtitles[i]\n else:\n self.authors.append(name)\n\n self.authordict[name] = m\n\n self.authorcontents.append(self.titles[i])\n self.authorrawcontents.append(self.rawtitles[i])\n \n self.authortitlesidx.append([i])\n self.authorbooktitleidx.append([i])\n\n idx = m\n m = m + 1\n authoridx.append(idx)\n # end dict version\n \n self.coauthorsidx.append(authoridx)\n i = i + 1\n\n f.close()\n\n \n f = codecs.open(self.f_authors_id,'r','utf-8')\n i = 0\n preline = ''\n for line in f:\n if preline != line:\n #print(i)\n #print('preline: {}, line: {}'.format(preline, line))\n if line[-1] == '\\n':\n newline = line[:-1]\n self.id_name[self.authors[i]] = newline\n preline = line\n i = i + 1\n \n else:\n continue\n \n #print(i)\n f.close()\n \n \n # load the per and org classification result\n self.per_org_label()\n \n self.vectorizer = CountVectorizer(max_df=0.95, min_df=1,stop_words='english')\n \n X = self.vectorizer.fit_transform(self.authorcontents)\n \n #Xarray = X.toarray()\n Xarray = X\n \n #plt.plot(hist)\n \n transformer = TfidfTransformer()\n \n self.tfidf = transformer.fit_transform(Xarray)\n #self.tfidfarray = self.tfidf.toarray()\n self.tfidfarray = self.tfidf\n \n self.featurenames = self.vectorizer.get_feature_names()\n\n \n \n \n \n \n \"\"\" \n \"\"\"\n def recommendationV3(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n #idx = self.authors.index(name)\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n name = name.decode('utf-8')\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n \n #content=[]\n \n \n \n self.myidx = authorIdx \n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n \n self.debugmsg('start distance computing \\n', 0)\n (self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)\n self.debugmsg('end distance computing \\n', 0)\n\n # here we can define the range to apply the otsu for recommendations\n # for example self.closeauthordis[0:1000] or all them\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis) \n self.debugmsg('end otsufilter\\n', 0) \n \n # splitidx contains the first index of three groups, close, medium, far\n # now generate three recommendations in each group\n recommendations = []\n \n # save the valid remdidx\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n # skip myself go to next one\n remdinfo = self.getremdinfo(i)\n if remdinfo and ~remdidx.count(i):\n #print remdinfo\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n #self.debugmsg(str(n) + ' ' + str(i), 0)\n \n i = i + 1\n \n # didn't find required number of valid remd untill the end\n # start backwards search\n if (i == len(self.closeauthordis)) or (backwardcount > 1):\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n #self.debugmsg('search backward ' + str(i), 0)\n \n\n # randomlize the order of the recommendations\n random.shuffle(recommendations)\n \n self.result=OrderedDict([(\"name\",name),(\"recommendations\",recommendations)]) \n self.debugmsg('end recommendationV3 \\n', 0)\n return self.result \n \n \"\"\" \n \"\"\"\n def recommendationV4(self, name, n):\n self.nremd = n\n self.debugmsg('Will generate recommendations in 3 groups and ' + str(n) + ' for each group', 1)\n self.debugmsg('find the idx', 0)\n if isinstance(name, str):\n #idx = self.authors.index(name)\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n else:\n #idx = self.authors.index(name.decode('utf-8')) \n name = name.decode('utf-8')\n name = ud.normalize('NFC',name)\n authorIdx = self.authordict.get(name)\n \n #content=[]\n \n \n \n self.myidx = authorIdx \n self.debugmsg('get the feature vector', 0)\n featuretfidf = self.tfidfarray[authorIdx]\n \n self.debugmsg('start distance computing \\n', 0)\n (self.closeauthors, self.closeauthordis) = self.nNNlinesearch(self.tfidfarray,featuretfidf,0)\n self.debugmsg('end distance computing \\n', 0)\n\n # here we can define the range to apply the otsu for recommendations\n # for example self.closeauthordis[0:1000] or all them\n self.debugmsg('start otsuifilter\\n', 0)\n splitidx = self.otsufilter(self.closeauthordis) \n self.debugmsg('end otsufilter\\n', 0) \n \n # splitidx contains the first index of three groups, close, medium, far\n # now generate three recommendations in each group\n recommendations = []\n \n # save the valid remdidx\n remdidx = []\n for i in splitidx:\n n = 0\n backwardcount = 1\n while n != self.nremd:\n if self.closeauthors[i] != self.myidx:\n # skip myself go to next one\n remdinfo = self.getremdinfoV2(i)\n if remdinfo and ~remdidx.count(i):\n #print remdinfo\n recommendations.append(remdinfo)\n n = n + 1\n remdidx.append(i)\n #self.debugmsg(str(n) + ' ' + str(i), 0)\n \n i = i + 1\n \n # didn't find required number of valid remd untill the end\n # start backwards search\n if (i == len(self.closeauthordis)) or (backwardcount > 1):\n if backwardcount == 1:\n backwardstart = i - self.nremd\n i = backwardstart - backwardcount\n backwardcount = backwardcount + 1\n #self.debugmsg('search backward ' + str(i), 0)\n \n\n # randomlize the order of the recommendations\n random.shuffle(recommendations)\n \n self.result=OrderedDict([(\"name\",name),(\"recommendations\",recommendations)]) \n self.debugmsg('end recommendationV4 \\n', 0)\n return self.result \n \n \"\"\"\n find n nearset neighbors of point p in given space using linear search\n if n == 0, sort all the points in space\n \"\"\"\n def nNNlinesearch(self, space, p, n):\n closeauthordis = []\n \n\n closeauthordis = pairwise_distances(space, p, metric='cosine')\n closeauthordis = closeauthordis.flatten()\n \n closeauthors = closeauthordis.argsort()\n closeauthordis.sort()\n \n if n > 0 :\n closeauthors = closeauthors[0:n]\n closeauthordis = closeauthordis[0:n]\n \n # delete myself, cuz the distance is always 0\n idx = np.where(closeauthors == self.myidx)[0][0]\n \n closeauthors = np.delete(closeauthors, idx)\n closeauthordis = np.delete(closeauthordis, idx)\n \n return (closeauthors, closeauthordis)\n \n\n\n \"\"\"\n split the distance in to 3 groups using otsu filtering\n return the first index of each group\n \"\"\"\n def otsufilter(self, tdis):\n trd = np.zeros(3, int)\n \n #tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis>t1])\n \n # the first index of each group\n# trd[1] = len(tdis[tdis<t1])\n# trd[2] = len(tdis) - len(tdis[tdis>t2])\n \n # get the medium 3 in the medium group\n # get the last 3 in the far group\n trd[1] = len(tdis[tdis<t1]) + int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1\n trd[2] = len(tdis) - 3 \n \n return trd\n\n \"\"\"\n extract the detail inforamtion of the recommendation by its indx in\n the closeauthors\n ignor those unqualified ones which has few papers or not active \n recently, and also remove my co-authors\n \"\"\"\n def getremdinfo(self, clsidx):\n # get the author index from closeauthors\n remdidx = self.closeauthors[clsidx]\n \n recentpub = self.resentpublicationsidx(remdidx)\n \n if recentpub:\n name = self.authors[remdidx]\n [coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n \n if idx.count(self.myidx):\n # remove the coauthor\n return []\n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([(\"name\",name), (\"relevancy\",self.closeauthordis[clsidx]),(\"coAuthors\",coauthors),(\"researchTopics\",researchtopic), (\"recentPublications\",recentpub)])\n else:\n return []\n\n \"\"\"\n extract the detail inforamtion of the recommendation by its indx in\n the closeauthors\n ignor those unqualified ones which has few papers or not active \n recently, and also remove known people in the mention network\n \"\"\"\n def getremdinfoV2(self, clsidx):\n # get the author index from closeauthors\n remdidx = self.closeauthors[clsidx]\n \n username = self.authors[self.myidx]\n \n recentpub = self.resentpublicationsidx(remdidx)\n \n if recentpub:\n name = self.authors[remdidx]\n #[coauthors, idx, c] = self.mycoauthorsV4byidx(remdidx)\n mentionlist = self.mentionnetwork[username]\n \n if name in mentionlist:\n # skip the coauthor\n return []\n \n #\n remdid = self.id_name[name]\n \n if self.labels[remdid] == 'org':\n return []\n \n # get the recommendation's mention list\n coauthors = self.mycoauthorsV4bymentionlist(name)\n \n researchtopic = self.keywordbyidx(remdidx)\n return OrderedDict([(\"name\",name), (\"relevancy\",self.closeauthordis[clsidx]),(\"coAuthors\", coauthors),(\"researchTopics\",researchtopic), (\"recentPublications\",recentpub)])\n else:\n return []\n\n \"\"\"\n \"\"\"\n def updatedistance(self):\n # 1st degree connection in coauthorship\n deg1con=self.coauthornet[self.myidx,self.closeauthors]\n deg1conidx = np.where(deg1con>0)[0]\n #deg1con = deg1con[deg1con>0]\n \n # 2nd degree connection in coauthorship\n deg2conidx = np.where(deg1con==0)[0]\n deg2con = np.zeros(deg2conidx.size)\n \n for i in self.closeauthors[deg1conidx]:\n deg2con = deg2con + self.coauthornet[i,self.closeauthors[deg2conidx]]\n \n deg1con = deg1con[deg1con>0]\n \n deg1con = deg1con/max(deg1con)\n return (deg1conidx, deg1con,deg2conidx,deg2con)\n \n \"\"\"\n return the top N recommendations:\n recommendations, coauthors, researchtopics, recentpub(at least 3 and no \n morethan 5 years) \n \"\"\"\n def filteredrecommendations(self, n):\n \n recommendations = []\n self.filteridx = []\n self.filteredauthors = []\n \n i = 0\n for name in self.recommendauthor:\n #coauthors = []\n #researchtopic = []\n #recentpub = []\n #coauthorsjson = []\n #[coauthors, idx, c] = self.mycoauthors(name)\n #[coauthors, idx, c] = self.mycoauthorsV2(name)\n #[coauthors, idx, c] = self.mycoauthorsV3(name)\n [coauthors, idx, c] = self.mycoauthorsV4(name)\n\n # remove the coauthors \n if idx.count(self.myidx):\n i = i+1\n continue\n \n recentpub = self.resentpublications(name)\n\n # check if the recentpub is empty which is not active anymore\n if not recentpub:\n i = i+1\n continue\n # -- \n\n self.filteredauthors.append(name) \n \n # take too much time skip in test\n # researchtopic = self.keyword(name)\n researchtopic = []\n researchtopic.append(OrderedDict([(\"topic\", \"TBD\")]))\n \n \n #recommendations.append({'name':name, 'coAuthors':coauthors, 'researchTopcs':researchtopic, 'recentPublications':recentpub} )\n recommendations.append(OrderedDict([(\"name\",name), (\"relevancy\",self.closeauthordis[i]),(\"coAuthors\",coauthors),(\"researchTopics\",researchtopic), (\"recentPublications\",recentpub)])) \n #result={'name':user, 'recommendations':recommendations};\n \n # save the picked idx\n self.filteridx.append(i) \n i = i+1\n \n # only need top n recommendations\n \n if len(self.filteridx) == n:\n break\n \n return recommendations\n \n \n \"\"\"\n \"\"\"\n def thresholdrecommendations(self, remds,n):\n \n thredremd = []\n self.trd = np.zeros(3)\n \n tdis = self.filteredcloseauthordis()\n t1 = filters.threshold_otsu(tdis)\n t2 = filters.threshold_otsu(tdis[tdis>t1])\n \n # get the top 3 in each group\n self.trd[1] = len(tdis[tdis<t1])\n self.trd[2] = len(tdis) - len(tdis[tdis>t2])\n \n # get the top 3 in first group, median 3 in second group, \n # last 3 in third group\n# self.trd[1] = int((len(tdis[tdis<t2]) - len(tdis[tdis<t1]))/2)-1\n# self.trd[2] = len(tdis) - 3\n \n \n for i in range(3):\n for j in range(int(n/3)):\n k = int(self.trd[i]+j)\n name = remds[k]['name']\n researchtopic = self.keyword(name)\n remds[k]['researchTopics'] = researchtopic\n thredremd.append(remds[k])\n \n return thredremd\n \n\n \n \"\"\"\n \"\"\"\n def filteredcloseauthordis(self):\n return self.closeauthordis[self.filteridx]\n \n \n \"\"\"\n \"\"\"\n def save_json(self,filename): \n PROJECT_DIRECTORY = 'output/project/' + project_name + '/'\n with io.open(PROJECT_DIRECTORY + filename +'.json','w',encoding=\"utf-8\") as outfile:\n outfile.write((json.dumps((self.result), ensure_ascii=False)))\n \n \n",
"step-ids": [
21,
25,
35,
43,
47
]
}
|
[
21,
25,
35,
43,
47
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Массив случайных чисел:\n', array)
<|reserved_special_token_0|>
for el in range(SIZE):
if array[el] > max_el:
max_el = array[el]
max_el_inx = el
if array[el] < min_el:
min_el = array[el]
min_el_inx = el
print('Минимальный и максимальный элементы массива:\n', min_el, 'и', max_el)
array.pop(min_el_inx)
array.insert(min_el_inx, max_el)
array.pop(max_el_inx)
array.insert(max_el_inx, min_el)
print(
'Массив, в котром поменяны местами минимальный и максимальный элементы:\n',
array)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SIZE = 10
MIN_ITEM = -100
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
print('Массив случайных чисел:\n', array)
min_el = array[0]
max_el = array[0]
max_el_inx = 0
min_el_inx = 0
for el in range(SIZE):
if array[el] > max_el:
max_el = array[el]
max_el_inx = el
if array[el] < min_el:
min_el = array[el]
min_el_inx = el
print('Минимальный и максимальный элементы массива:\n', min_el, 'и', max_el)
array.pop(min_el_inx)
array.insert(min_el_inx, max_el)
array.pop(max_el_inx)
array.insert(max_el_inx, min_el)
print(
'Массив, в котром поменяны местами минимальный и максимальный элементы:\n',
array)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
SIZE = 10
MIN_ITEM = -100
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
print('Массив случайных чисел:\n', array)
min_el = array[0]
max_el = array[0]
max_el_inx = 0
min_el_inx = 0
for el in range(SIZE):
if array[el] > max_el:
max_el = array[el]
max_el_inx = el
if array[el] < min_el:
min_el = array[el]
min_el_inx = el
print('Минимальный и максимальный элементы массива:\n', min_el, 'и', max_el)
array.pop(min_el_inx)
array.insert(min_el_inx, max_el)
array.pop(max_el_inx)
array.insert(max_el_inx, min_el)
print(
'Массив, в котром поменяны местами минимальный и максимальный элементы:\n',
array)
<|reserved_special_token_1|>
"""
В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.
"""
import random
SIZE = 10
MIN_ITEM = -100
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
print('Массив случайных чисел:\n', array)
min_el = array[0]
max_el = array[0]
max_el_inx = 0
min_el_inx = 0
for el in range(SIZE):
if array[el] > max_el:
max_el = array[el]
max_el_inx = el
if array[el] < min_el:
min_el = array[el]
min_el_inx = el
print('Минимальный и максимальный элементы массива:\n', min_el, 'и', max_el)
array.pop(min_el_inx)
array.insert(min_el_inx, max_el)
array.pop(max_el_inx)
array.insert(max_el_inx, min_el)
print('Массив, в котром поменяны местами минимальный и максимальный элементы:\n', array)
|
flexible
|
{
"blob_id": "6027836b1b5d3cb8b842b1a1b77f5c9777269896",
"index": 7177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Массив случайных чисел:\\n', array)\n<mask token>\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint(\n 'Массив, в котром поменяны местами минимальный и максимальный элементы:\\n',\n array)\n",
"step-3": "<mask token>\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\nprint('Массив случайных чисел:\\n', array)\nmin_el = array[0]\nmax_el = array[0]\nmax_el_inx = 0\nmin_el_inx = 0\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint(\n 'Массив, в котром поменяны местами минимальный и максимальный элементы:\\n',\n array)\n",
"step-4": "<mask token>\nimport random\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\nprint('Массив случайных чисел:\\n', array)\nmin_el = array[0]\nmax_el = array[0]\nmax_el_inx = 0\nmin_el_inx = 0\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint(\n 'Массив, в котром поменяны местами минимальный и максимальный элементы:\\n',\n array)\n",
"step-5": "\"\"\"\n В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.\n\"\"\"\n\n\nimport random\n\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\nprint('Массив случайных чисел:\\n', array)\n\nmin_el = array[0]\nmax_el = array[0]\nmax_el_inx = 0\nmin_el_inx = 0\nfor el in range(SIZE):\n if array[el] > max_el:\n max_el = array[el]\n max_el_inx = el\n if array[el] < min_el:\n min_el = array[el]\n min_el_inx = el\nprint('Минимальный и максимальный элементы массива:\\n', min_el, 'и', max_el)\narray.pop(min_el_inx)\narray.insert(min_el_inx, max_el)\narray.pop(max_el_inx)\narray.insert(max_el_inx, min_el)\nprint('Массив, в котром поменяны местами минимальный и максимальный элементы:\\n', array)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')
import django
django.setup()
from applistions.models import MyClass, Student, Teacher, Employee
from django.db.models import Avg, Sum, Max, Min, Count
ret = Employee.objects.all().aggregate(Max('salary'))
print(ret)
ret = Employee.objects.all().aggregate(max_salary=Max('salary'))
print(ret)
ret = Employee.objects.all().aggregate(Avg('salary'))
print(ret)
ret = Employee.objects.values('dept').aggregate(Avg('salary'))
print(ret)
ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(
'dept', 'salary__avg')
print(ret)
ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')
).values_list('dept', 'avg_age')
print(ret)
ret = Student.objects.values('myclass').annotate(s_count=Count('id'))
print(ret)
<|reserved_special_token_1|>
import os
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')
import django
django.setup()
from applistions.models import MyClass, Student, Teacher, Employee
from django.db.models import Avg, Sum, Max, Min, Count
ret = Employee.objects.all().aggregate(Max('salary'))
print(ret)
ret = Employee.objects.all().aggregate(max_salary=Max('salary'))
print(ret)
ret = Employee.objects.all().aggregate(Avg('salary'))
print(ret)
ret = Employee.objects.values('dept').aggregate(Avg('salary'))
print(ret)
ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(
'dept', 'salary__avg')
print(ret)
ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')
).values_list('dept', 'avg_age')
print(ret)
ret = Student.objects.values('myclass').annotate(s_count=Count('id'))
print(ret)
<|reserved_special_token_1|>
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "day66.settings")
import django
django.setup()
from applistions.models import MyClass,Student,Teacher,Employee
from django.db.models import Avg, Sum, Max, Min, Count
# 1.求所有人里面工资最高的
ret = Employee.objects.all().aggregate(Max('salary'))
print(ret) # {'salary__max': 80909}
# # 指定返回字典中key的值
ret = Employee.objects.all().aggregate(max_salary=Max('salary'))
print(ret) # {'max_salary': 80909}
# # 求所有人的平均价格
ret = Employee.objects.all().aggregate(Avg('salary'))
print(ret) # {'salary__avg': 20855.1667}
# 使用ORM查询每个部门的平均工资
ret = Employee.objects.values('dept').aggregate(Avg('salary'))
print(ret) # 查询的是每个人的平均工资,此条查询错误
# annotate中要写上分住之后要做的事情
# anntate前面查询的是什么就按什么分组
ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')
print(ret) # <QuerySet [('财务部', 2111.0), ('技术部', 17000.0), ('人事部', 6000.0), ('管理部', 80909.0)]>
# # ORM中分组使用annotate
# # 1. annotate中要写上分组之后要做的事情
# # 2. annotate前面查询的是什么就按什么分组
# ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')
# print(ret)
#
# # 每个部门的平均年龄
ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')
print(ret) # <QuerySet [('财务部', 27.5), ('技术部', 300.0), ('人事部', 45.0), ('管理部', 45.0)]>
# # 求每个班级的学生的数量
ret = Student.objects.values('myclass').annotate(s_count=Count('id'))
print(ret) # <QuerySet [{'myclass': 1, 's_count': 1}, {'myclass': 2, 's_count': 3}, {'myclass': 3, 's_count': 2}, {'myclass': 4, 's_count': 1}, {'myclass': 5, 's_count': 1}, {'myclass': 6, 's_count': 1}, {'myclass': 7, 's_count': 1}]>
|
flexible
|
{
"blob_id": "ee72262fb29b46784fb357269dd5160192968c1b",
"index": 1713,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')\n import django\n django.setup()\n from applistions.models import MyClass, Student, Teacher, Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(\n 'dept', 'salary__avg')\n print(ret)\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')\n ).values_list('dept', 'avg_age')\n print(ret)\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret)\n",
"step-3": "import os\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')\n import django\n django.setup()\n from applistions.models import MyClass, Student, Teacher, Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(\n 'dept', 'salary__avg')\n print(ret)\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')\n ).values_list('dept', 'avg_age')\n print(ret)\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret)\n",
"step-4": "import os\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"day66.settings\")\n\n import django\n django.setup()\n\n from applistions.models import MyClass,Student,Teacher,Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n\n # 1.求所有人里面工资最高的\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret) # {'salary__max': 80909}\n\n # # 指定返回字典中key的值\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret) # {'max_salary': 80909}\n\n # # 求所有人的平均价格\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret) # {'salary__avg': 20855.1667}\n\n # 使用ORM查询每个部门的平均工资\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret) # 查询的是每个人的平均工资,此条查询错误\n # annotate中要写上分住之后要做的事情\n # anntate前面查询的是什么就按什么分组\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')\n print(ret) # <QuerySet [('财务部', 2111.0), ('技术部', 17000.0), ('人事部', 6000.0), ('管理部', 80909.0)]>\n\n # # ORM中分组使用annotate\n # # 1. annotate中要写上分组之后要做的事情\n # # 2. annotate前面查询的是什么就按什么分组\n # ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')\n # print(ret)\n #\n # # 每个部门的平均年龄\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')\n print(ret) # <QuerySet [('财务部', 27.5), ('技术部', 300.0), ('人事部', 45.0), ('管理部', 45.0)]>\n\n # # 求每个班级的学生的数量\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret) # <QuerySet [{'myclass': 1, 's_count': 1}, {'myclass': 2, 's_count': 3}, {'myclass': 3, 's_count': 2}, {'myclass': 4, 's_count': 1}, {'myclass': 5, 's_count': 1}, {'myclass': 6, 's_count': 1}, {'myclass': 7, 's_count': 1}]>\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from collections import defaultdict
from django.shortcuts import render
from django.views.decorators.cache import cache_control
from peterbecom.plog.models import BlogItem, Category
from peterbecom.plog.utils import utc_now
from peterbecom.plog.views import json_view
ONE_MONTH = 60 * 60 * 24 * 30
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, "ajaxornot/index.html")
def get_data(max_length=1000, pub_date_format=None, offset=0):
items = []
category_names = dict((x.id, x.name) for x in Category.objects.all())
categories = defaultdict(list)
for e in BlogItem.categories.through.objects.all():
categories[e.blogitem_id].append(category_names[e.category_id])
qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by("-pub_date")
for item in qs[offset:max_length]:
pub_date = item.pub_date
if pub_date_format:
pub_date = pub_date_format(pub_date)
items.append(
{
"title": item.title,
"slug": item.oid,
"pub_date": pub_date,
"keywords": [x for x in item.proper_keywords if x][:3],
"categories": categories[item.id][:3],
}
)
return items
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {"items": get_data()}
return render(request, "ajaxornot/view1.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view2(request):
return render(request, "ajaxornot/view2.html")
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {"items": get_data()}
return render(request, "ajaxornot/view2_table.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, "ajaxornot/view3.html")
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view3_data(request):
return {"items": get_data(pub_date_format=lambda x: x.strftime("%B %Y"))}
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime("%B %Y"))
context = {"items": data}
return render(request, "ajaxornot/view4.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5(request):
context = {"items": get_data(max_length=25)}
return render(request, "ajaxornot/view5.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5_table(request):
context = {"items": get_data(offset=25)}
return render(request, "ajaxornot/view5_trs.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, "ajaxornot/view6.html")
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view6_data(request):
return {"items": get_data(pub_date_format=lambda x: x.strftime("%B %Y"))}
@cache_control(public=True, max_age=ONE_MONTH)
def view7a(request):
return render(request, "ajaxornot/view7a.html")
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, "ajaxornot/view7b.html")
|
normal
|
{
"blob_id": "e90fb3b6009dd4fb780649c04398b361fa1ae195",
"index": 8489,
"step-1": "<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-2": "<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, 'ajaxornot/view2.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-3": "<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\ndef get_data(max_length=1000, pub_date_format=None, offset=0):\n items = []\n category_names = dict((x.id, x.name) for x in Category.objects.all())\n categories = defaultdict(list)\n for e in BlogItem.categories.through.objects.all():\n categories[e.blogitem_id].append(category_names[e.category_id])\n qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by('-pub_date')\n for item in qs[offset:max_length]:\n pub_date = item.pub_date\n if pub_date_format:\n pub_date = pub_date_format(pub_date)\n items.append({'title': item.title, 'slug': item.oid, 'pub_date':\n pub_date, 'keywords': [x for x in item.proper_keywords if x][:3\n ], 'categories': categories[item.id][:3]})\n return items\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, 'ajaxornot/view2.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view3_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5(request):\n context = {'items': get_data(max_length=25)}\n return render(request, 'ajaxornot/view5.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5_table(request):\n context = {'items': get_data(offset=25)}\n return render(request, 'ajaxornot/view5_trs.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7a(request):\n return render(request, 'ajaxornot/view7a.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-4": "<mask token>\nONE_MONTH = 60 * 60 * 24 * 30\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\ndef get_data(max_length=1000, pub_date_format=None, offset=0):\n items = []\n category_names = dict((x.id, x.name) for x in Category.objects.all())\n categories = defaultdict(list)\n for e in BlogItem.categories.through.objects.all():\n categories[e.blogitem_id].append(category_names[e.category_id])\n qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by('-pub_date')\n for item in qs[offset:max_length]:\n pub_date = item.pub_date\n if pub_date_format:\n pub_date = pub_date_format(pub_date)\n items.append({'title': item.title, 'slug': item.oid, 'pub_date':\n pub_date, 'keywords': [x for x in item.proper_keywords if x][:3\n ], 'categories': categories[item.id][:3]})\n return items\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, 'ajaxornot/view2.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view3_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5(request):\n context = {'items': get_data(max_length=25)}\n return render(request, 'ajaxornot/view5.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5_table(request):\n context = {'items': get_data(offset=25)}\n return render(request, 'ajaxornot/view5_trs.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7a(request):\n return render(request, 'ajaxornot/view7a.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-5": "from collections import defaultdict\n\nfrom django.shortcuts import render\nfrom django.views.decorators.cache import cache_control\n\nfrom peterbecom.plog.models import BlogItem, Category\nfrom peterbecom.plog.utils import utc_now\nfrom peterbecom.plog.views import json_view\n\nONE_MONTH = 60 * 60 * 24 * 30\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, \"ajaxornot/index.html\")\n\n\ndef get_data(max_length=1000, pub_date_format=None, offset=0):\n items = []\n category_names = dict((x.id, x.name) for x in Category.objects.all())\n categories = defaultdict(list)\n for e in BlogItem.categories.through.objects.all():\n categories[e.blogitem_id].append(category_names[e.category_id])\n qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by(\"-pub_date\")\n for item in qs[offset:max_length]:\n pub_date = item.pub_date\n if pub_date_format:\n pub_date = pub_date_format(pub_date)\n items.append(\n {\n \"title\": item.title,\n \"slug\": item.oid,\n \"pub_date\": pub_date,\n \"keywords\": [x for x in item.proper_keywords if x][:3],\n \"categories\": categories[item.id][:3],\n }\n )\n return items\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {\"items\": get_data()}\n return render(request, \"ajaxornot/view1.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, \"ajaxornot/view2.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {\"items\": get_data()}\n return render(request, \"ajaxornot/view2_table.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, \"ajaxornot/view3.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view3_data(request):\n return {\"items\": get_data(pub_date_format=lambda x: x.strftime(\"%B %Y\"))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime(\"%B %Y\"))\n context = {\"items\": data}\n return render(request, \"ajaxornot/view4.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5(request):\n context = {\"items\": get_data(max_length=25)}\n return render(request, \"ajaxornot/view5.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5_table(request):\n context = {\"items\": get_data(offset=25)}\n return render(request, \"ajaxornot/view5_trs.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, \"ajaxornot/view6.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {\"items\": get_data(pub_date_format=lambda x: x.strftime(\"%B %Y\"))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7a(request):\n return render(request, \"ajaxornot/view7a.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, \"ajaxornot/view7b.html\")\n",
"step-ids": [
7,
9,
14,
15,
17
]
}
|
[
7,
9,
14,
15,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Course(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Course(models.Model):
cid = models.CharField(max_length=100)
title = models.CharField(max_length=500)
link = models.CharField(max_length=300)
<|reserved_special_token_1|>
from django.db import models
class Course(models.Model):
cid = models.CharField(max_length=100)
title = models.CharField(max_length=500)
link = models.CharField(max_length=300)
|
flexible
|
{
"blob_id": "226fc85dc8b6d549fddef0ca43ad629875ac0717",
"index": 3080,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Course(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Course(models.Model):\n cid = models.CharField(max_length=100)\n title = models.CharField(max_length=500)\n link = models.CharField(max_length=300)\n",
"step-4": "from django.db import models\n\n\nclass Course(models.Model):\n cid = models.CharField(max_length=100)\n title = models.CharField(max_length=500)\n link = models.CharField(max_length=300)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def check(t, d, c):
if t == 1:
if m1[2] != m2[-2] and not c:
check(t + 1, d * -1, 1)
if d == 1:
m1.appendleft(m1.pop())
else:
m1.append(m1.popleft())
elif t == 4:
if m4[-2] != m3[2] and not c:
check(t - 1, d * -1, 4)
if d == 1:
m4.appendleft(m4.pop())
else:
m4.append(m4.popleft())
elif t == 2:
if m2[2] != m3[-2] and (not c or c == 1):
check(t + 1, d * -1, 2)
if m2[-2] != m1[2] and (not c or c == 3):
check(t - 1, d * -1, 2)
if d == 1:
m2.appendleft(m2.pop())
else:
m2.append(m2.popleft())
else:
if m3[2] != m4[-2] and (not c or c == 2):
check(t + 1, d * -1, 3)
if m3[-2] != m2[2] and (not c or c == 4):
check(t - 1, d * -1, 3)
if d == 1:
m3.appendleft(m3.pop())
else:
m3.append(m3.popleft())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check(t, d, c):
if t == 1:
if m1[2] != m2[-2] and not c:
check(t + 1, d * -1, 1)
if d == 1:
m1.appendleft(m1.pop())
else:
m1.append(m1.popleft())
elif t == 4:
if m4[-2] != m3[2] and not c:
check(t - 1, d * -1, 4)
if d == 1:
m4.appendleft(m4.pop())
else:
m4.append(m4.popleft())
elif t == 2:
if m2[2] != m3[-2] and (not c or c == 1):
check(t + 1, d * -1, 2)
if m2[-2] != m1[2] and (not c or c == 3):
check(t - 1, d * -1, 2)
if d == 1:
m2.appendleft(m2.pop())
else:
m2.append(m2.popleft())
else:
if m3[2] != m4[-2] and (not c or c == 2):
check(t + 1, d * -1, 3)
if m3[-2] != m2[2] and (not c or c == 4):
check(t - 1, d * -1, 3)
if d == 1:
m3.appendleft(m3.pop())
else:
m3.append(m3.popleft())
for test_case in range(1, int(input()) + 1):
m1, m2, m3, m4 = deque(), deque(), deque(), deque()
K = int(input())
for _ in range(4):
if m1:
if m2:
if m3:
m4 += list(map(int, input().split()))
else:
m3 += list(map(int, input().split()))
else:
m2 += list(map(int, input().split()))
else:
m1 += list(map(int, input().split()))
for _ in range(K):
touch, direction = map(int, input().split())
check(touch, direction, 0)
result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]
print('#{} {}'.format(test_case, result))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.stdin = open('magnet.txt', 'r')
<|reserved_special_token_0|>
def check(t, d, c):
if t == 1:
if m1[2] != m2[-2] and not c:
check(t + 1, d * -1, 1)
if d == 1:
m1.appendleft(m1.pop())
else:
m1.append(m1.popleft())
elif t == 4:
if m4[-2] != m3[2] and not c:
check(t - 1, d * -1, 4)
if d == 1:
m4.appendleft(m4.pop())
else:
m4.append(m4.popleft())
elif t == 2:
if m2[2] != m3[-2] and (not c or c == 1):
check(t + 1, d * -1, 2)
if m2[-2] != m1[2] and (not c or c == 3):
check(t - 1, d * -1, 2)
if d == 1:
m2.appendleft(m2.pop())
else:
m2.append(m2.popleft())
else:
if m3[2] != m4[-2] and (not c or c == 2):
check(t + 1, d * -1, 3)
if m3[-2] != m2[2] and (not c or c == 4):
check(t - 1, d * -1, 3)
if d == 1:
m3.appendleft(m3.pop())
else:
m3.append(m3.popleft())
for test_case in range(1, int(input()) + 1):
m1, m2, m3, m4 = deque(), deque(), deque(), deque()
K = int(input())
for _ in range(4):
if m1:
if m2:
if m3:
m4 += list(map(int, input().split()))
else:
m3 += list(map(int, input().split()))
else:
m2 += list(map(int, input().split()))
else:
m1 += list(map(int, input().split()))
for _ in range(K):
touch, direction = map(int, input().split())
check(touch, direction, 0)
result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]
print('#{} {}'.format(test_case, result))
<|reserved_special_token_1|>
import sys
sys.stdin = open('magnet.txt', 'r')
from collections import deque
def check(t, d, c):
if t == 1:
if m1[2] != m2[-2] and not c:
check(t + 1, d * -1, 1)
if d == 1:
m1.appendleft(m1.pop())
else:
m1.append(m1.popleft())
elif t == 4:
if m4[-2] != m3[2] and not c:
check(t - 1, d * -1, 4)
if d == 1:
m4.appendleft(m4.pop())
else:
m4.append(m4.popleft())
elif t == 2:
if m2[2] != m3[-2] and (not c or c == 1):
check(t + 1, d * -1, 2)
if m2[-2] != m1[2] and (not c or c == 3):
check(t - 1, d * -1, 2)
if d == 1:
m2.appendleft(m2.pop())
else:
m2.append(m2.popleft())
else:
if m3[2] != m4[-2] and (not c or c == 2):
check(t + 1, d * -1, 3)
if m3[-2] != m2[2] and (not c or c == 4):
check(t - 1, d * -1, 3)
if d == 1:
m3.appendleft(m3.pop())
else:
m3.append(m3.popleft())
for test_case in range(1, int(input()) + 1):
m1, m2, m3, m4 = deque(), deque(), deque(), deque()
K = int(input())
for _ in range(4):
if m1:
if m2:
if m3:
m4 += list(map(int, input().split()))
else:
m3 += list(map(int, input().split()))
else:
m2 += list(map(int, input().split()))
else:
m1 += list(map(int, input().split()))
for _ in range(K):
touch, direction = map(int, input().split())
check(touch, direction, 0)
result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]
print('#{} {}'.format(test_case, result))
<|reserved_special_token_1|>
import sys
sys.stdin = open('magnet.txt', 'r')
from collections import deque
def check(t, d, c):
if t == 1:
if m1[2] != m2[-2] and not c:
check(t + 1, d * (-1), 1)
if d == 1:
m1.appendleft(m1.pop())
else:
m1.append(m1.popleft())
elif t == 4:
if m4[-2] != m3[2] and not c:
check(t - 1, d * (-1), 4)
if d == 1:
m4.appendleft(m4.pop())
else:
m4.append(m4.popleft())
elif t == 2:
if m2[2] != m3[-2] and (not c or c == 1):
check(t + 1, d * (-1), 2)
if m2[-2] != m1[2] and (not c or c == 3):
check(t - 1, d * (-1), 2)
if d == 1:
m2.appendleft(m2.pop())
else:
m2.append(m2.popleft())
else:
if m3[2] != m4[-2] and (not c or c == 2):
check(t + 1, d * (-1), 3)
if m3[-2] != m2[2] and (not c or c == 4):
check(t - 1, d * (-1), 3)
if d == 1:
m3.appendleft(m3.pop())
else:
m3.append(m3.popleft())
for test_case in range(1, int(input()) + 1):
m1, m2, m3, m4 = deque(), deque(), deque(), deque()
K = int(input())
for _ in range(4):
if m1:
if m2:
if m3:
m4 += list(map(int, input().split()))
else:
m3 += list(map(int, input().split()))
else:
m2 += list(map(int, input().split()))
else:
m1 += list(map(int, input().split()))
for _ in range(K):
touch, direction = map(int, input().split())
check(touch, direction, 0)
result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]
print('#{} {}'.format(test_case, result))
|
flexible
|
{
"blob_id": "7e3a5e1f19683b1716f3c988dcc1e65fee1cae13",
"index": 8956,
"step-1": "<mask token>\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n",
"step-3": "<mask token>\nsys.stdin = open('magnet.txt', 'r')\n<mask token>\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n",
"step-4": "import sys\nsys.stdin = open('magnet.txt', 'r')\nfrom collections import deque\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n",
"step-5": "import sys\nsys.stdin = open('magnet.txt', 'r')\nfrom collections import deque\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * (-1), 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * (-1), 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * (-1), 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * (-1), 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * (-1), 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * (-1), 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv('../../Data/breast_cancer_data/data.csv')
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y == 'M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv('../../Data/breast_cancer_data/data.csv')
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y == 'M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
def main():
np.random.seed(0)
train_X, train_y, test_X, test_y = load_data()
base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (
'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',
DecisionTreeClassifier())]
sc = StackingClassifier(estimators=base_models)
sc.fit(train_X, train_y)
y_pred = sc.predict(test_X)
print(f"f1 score = {f1_score(y_pred, test_y, average='weighted')}")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv('../../Data/breast_cancer_data/data.csv')
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y == 'M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
def main():
np.random.seed(0)
train_X, train_y, test_X, test_y = load_data()
base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (
'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',
DecisionTreeClassifier())]
sc = StackingClassifier(estimators=base_models)
sc.fit(train_X, train_y)
y_pred = sc.predict(test_X)
print(f"f1 score = {f1_score(y_pred, test_y, average='weighted')}")
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import StackingClassifier, RandomForestClassifier
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv('../../Data/breast_cancer_data/data.csv')
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y == 'M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
def main():
np.random.seed(0)
train_X, train_y, test_X, test_y = load_data()
base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (
'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',
DecisionTreeClassifier())]
sc = StackingClassifier(estimators=base_models)
sc.fit(train_X, train_y)
y_pred = sc.predict(test_X)
print(f"f1 score = {f1_score(y_pred, test_y, average='weighted')}")
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import (
StackingClassifier,
RandomForestClassifier
)
import pandas as pd
from sklearn.metrics import f1_score
# feel free to import any sklearn model here
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv("../../Data/breast_cancer_data/data.csv")
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y=='M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
def main():
np.random.seed(0)
train_X, train_y, test_X, test_y = load_data()
# Stacking models:
# Create your stacked model using StackingClassifier
base_models = [
('rfc', RandomForestClassifier()),
('svm', SVC()),
('gnb', GaussianNB()),
('knc', KNeighborsClassifier()),
('dtc', DecisionTreeClassifier())
]
# The default final_estimator is LogisticRegression
sc = StackingClassifier(estimators=base_models)
# fit the model on the training data
sc.fit(train_X, train_y)
# predict
y_pred = sc.predict(test_X)
# Get and print f1-score on test data
print(f"f1 score = {f1_score(y_pred, test_y , average = 'weighted')}")
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "cf65966f5daf88bdefc7a8aa2ff80835cff0d0b6",
"index": 4627,
"step-1": "<mask token>\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (\n 'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',\n DecisionTreeClassifier())]\n sc = StackingClassifier(estimators=base_models)\n sc.fit(train_X, train_y)\n y_pred = sc.predict(test_X)\n print(f\"f1 score = {f1_score(y_pred, test_y, average='weighted')}\")\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (\n 'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',\n DecisionTreeClassifier())]\n sc = StackingClassifier(estimators=base_models)\n sc.fit(train_X, train_y)\n y_pred = sc.predict(test_X)\n print(f\"f1 score = {f1_score(y_pred, test_y, average='weighted')}\")\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import StackingClassifier, RandomForestClassifier\nimport pandas as pd\nfrom sklearn.metrics import f1_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv('../../Data/breast_cancer_data/data.csv')\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y == 'M').astype(np.int) * 2 - 1\n train_X = X[:-150]\n train_y = y[:-150]\n test_X = X[-150:]\n test_y = y[-150:]\n return train_X, train_y, test_X, test_y\n\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n base_models = [('rfc', RandomForestClassifier()), ('svm', SVC()), (\n 'gnb', GaussianNB()), ('knc', KNeighborsClassifier()), ('dtc',\n DecisionTreeClassifier())]\n sc = StackingClassifier(estimators=base_models)\n sc.fit(train_X, train_y)\n y_pred = sc.predict(test_X)\n print(f\"f1 score = {f1_score(y_pred, test_y, average='weighted')}\")\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import (\n StackingClassifier,\n RandomForestClassifier\n) \nimport pandas as pd\nfrom sklearn.metrics import f1_score\n# feel free to import any sklearn model here\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef load_data():\n \"\"\"\n Helper function for loading in the data\n\n ------\n # of training samples: 419\n # of testing samples: 150\n ------\n \"\"\"\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y\n\ndef main():\n np.random.seed(0)\n train_X, train_y, test_X, test_y = load_data()\n \n # Stacking models:\n # Create your stacked model using StackingClassifier\n base_models = [\n ('rfc', RandomForestClassifier()),\n ('svm', SVC()),\n ('gnb', GaussianNB()),\n ('knc', KNeighborsClassifier()),\n ('dtc', DecisionTreeClassifier())\n ]\n \n # The default final_estimator is LogisticRegression\n sc = StackingClassifier(estimators=base_models)\n\n # fit the model on the training data\n sc.fit(train_X, train_y)\n\n # predict\n y_pred = sc.predict(test_X)\n\n # Get and print f1-score on test data\n print(f\"f1 score = {f1_score(y_pred, test_y , average = 'weighted')}\")\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
import sys
class Generator:
def __init__(self, seed, factor, multiple):
self.value = seed
self.factor = factor
self.multiple = multiple
def iterate(self):
self.value = ( self.value * self.factor ) % 2147483647
# Repeat if this isn't an exact multiple
while self.value % self.multiple != 0:
self.value = ( self.value * self.factor ) % 2147483647
return self.value
# Read the input
seed_a = int(sys.argv[1])
seed_b = int(sys.argv[2])
gen_a = Generator(seed_a, 16807, 4)
gen_b = Generator(seed_b, 48271, 8)
matches = 0
for i in range(0,5000000):
val_a = gen_a.iterate()
val_b = gen_b.iterate()
# print "{0:16d}\t{1:16d}".format(val_a, val_b)
lowest16 = 2 ** 16 - 1
low_a = val_a & lowest16
low_b = val_b & lowest16
# print format(low_a, '016b')
# print format(low_b, '016b')
if ( low_a == low_b ):
matches += 1
print matches
|
normal
|
{
"blob_id": "4bb006e2e457f5b11157dacb43fe94c8b400f146",
"index": 5105,
"step-1": "#!/usr/bin/python\n\nimport sys\n\nclass Generator:\n def __init__(self, seed, factor, multiple):\n self.value = seed\n self.factor = factor\n self.multiple = multiple\n\n def iterate(self):\n self.value = ( self.value * self.factor ) % 2147483647\n # Repeat if this isn't an exact multiple\n while self.value % self.multiple != 0:\n self.value = ( self.value * self.factor ) % 2147483647\n return self.value\n\n# Read the input\n\nseed_a = int(sys.argv[1])\nseed_b = int(sys.argv[2])\n\ngen_a = Generator(seed_a, 16807, 4)\ngen_b = Generator(seed_b, 48271, 8)\n\nmatches = 0\nfor i in range(0,5000000):\n val_a = gen_a.iterate()\n val_b = gen_b.iterate()\n\n # print \"{0:16d}\\t{1:16d}\".format(val_a, val_b)\n\n lowest16 = 2 ** 16 - 1\n\n low_a = val_a & lowest16\n low_b = val_b & lowest16\n\n # print format(low_a, '016b')\n # print format(low_b, '016b')\n\n if ( low_a == low_b ):\n matches += 1\n\nprint matches\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def visualize_batch(pointclouds, pred_labels, labels, categories):
batch_size = len(pointclouds)
fig = plt.figure(figsize=(8, batch_size / 2))
ncols = 5
nrows = max(1, batch_size // 5)
for idx, pc in enumerate(pointclouds):
label = categories[int(labels[idx].item())]
pred = categories[int(pred_labels[idx])]
colour = 'g' if label == pred else 'r'
pc = pc.cpu().numpy()
ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')
ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)
ax.axis('off')
ax.set_title('GT: {0}\nPred: {1}'.format(label, pred))
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def visualize_batch(pointclouds, pred_labels, labels, categories):
batch_size = len(pointclouds)
fig = plt.figure(figsize=(8, batch_size / 2))
ncols = 5
nrows = max(1, batch_size // 5)
for idx, pc in enumerate(pointclouds):
label = categories[int(labels[idx].item())]
pred = categories[int(pred_labels[idx])]
colour = 'g' if label == pred else 'r'
pc = pc.cpu().numpy()
ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')
ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)
ax.axis('off')
ax.set_title('GT: {0}\nPred: {1}'.format(label, pred))
plt.show()
if __name__ == '__main__':
with open('config.yaml', 'r') as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)
modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'],
config['MODEL']['POINTNET']['TRAINING'])
training_instance_2.test(modelnet10_dataloader.validloader)
<|reserved_special_token_1|>
import torch
from training import PointNetTrain, PointAugmentTrain, Model
from data_utils.dataloader import DataLoaderClass
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import numpy as np
import yaml
def visualize_batch(pointclouds, pred_labels, labels, categories):
batch_size = len(pointclouds)
fig = plt.figure(figsize=(8, batch_size / 2))
ncols = 5
nrows = max(1, batch_size // 5)
for idx, pc in enumerate(pointclouds):
label = categories[int(labels[idx].item())]
pred = categories[int(pred_labels[idx])]
colour = 'g' if label == pred else 'r'
pc = pc.cpu().numpy()
ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')
ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)
ax.axis('off')
ax.set_title('GT: {0}\nPred: {1}'.format(label, pred))
plt.show()
if __name__ == '__main__':
with open('config.yaml', 'r') as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)
modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'],
config['MODEL']['POINTNET']['TRAINING'])
training_instance_2.test(modelnet10_dataloader.validloader)
<|reserved_special_token_1|>
import torch
from training import PointNetTrain, PointAugmentTrain, Model
#from PointAugment.Augment.config import opts
from data_utils.dataloader import DataLoaderClass
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import numpy as np
import yaml
def visualize_batch(pointclouds, pred_labels, labels, categories):
batch_size = len(pointclouds)
fig = plt.figure(figsize=(8, batch_size / 2))
ncols = 5
nrows = max(1, batch_size // 5)
for idx, pc in enumerate(pointclouds):
label = categories[int(labels[idx].item())]
pred = categories[int(pred_labels[idx])]
colour = 'g' if label == pred else 'r'
pc = pc.cpu().numpy()
ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')
ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)
ax.axis('off')
ax.set_title('GT: {0}\nPred: {1}'.format(label, pred))
plt.show()
if __name__ == '__main__':
with open("config.yaml", "r") as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# PointNet
training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)
modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING'])
#training_instance_2.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)
training_instance_2.test(modelnet10_dataloader.validloader)
# Point Augment
#training_instance_1 = PointAugmentTrain(config['MODEL']['POINT_AUGMENT'], device)
#modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING'])
#training_instance_1.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)
#training_instance_1.test(modelnet10_dataloader.validloader)
|
flexible
|
{
"blob_id": "0ced42c8bfaad32fc2b397326150e6c7bc5cedab",
"index": 4991,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n plt.show()\n\n\nif __name__ == '__main__':\n with open('config.yaml', 'r') as yamlfile:\n config = yaml.load(yamlfile, Loader=yaml.FullLoader)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)\n modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'],\n config['MODEL']['POINTNET']['TRAINING'])\n training_instance_2.test(modelnet10_dataloader.validloader)\n",
"step-4": "import torch\nfrom training import PointNetTrain, PointAugmentTrain, Model\nfrom data_utils.dataloader import DataLoaderClass\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport yaml\n\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n plt.show()\n\n\nif __name__ == '__main__':\n with open('config.yaml', 'r') as yamlfile:\n config = yaml.load(yamlfile, Loader=yaml.FullLoader)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)\n modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'],\n config['MODEL']['POINTNET']['TRAINING'])\n training_instance_2.test(modelnet10_dataloader.validloader)\n",
"step-5": "import torch\nfrom training import PointNetTrain, PointAugmentTrain, Model\n#from PointAugment.Augment.config import opts\nfrom data_utils.dataloader import DataLoaderClass\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport yaml\n\ndef visualize_batch(pointclouds, pred_labels, labels, categories):\n batch_size = len(pointclouds)\n fig = plt.figure(figsize=(8, batch_size / 2))\n\n ncols = 5\n nrows = max(1, batch_size // 5)\n for idx, pc in enumerate(pointclouds):\n label = categories[int(labels[idx].item())]\n pred = categories[int(pred_labels[idx])]\n colour = 'g' if label == pred else 'r'\n pc = pc.cpu().numpy()\n ax = fig.add_subplot(nrows, ncols, idx + 1, projection='3d')\n ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], c=colour, s=2)\n ax.axis('off')\n ax.set_title('GT: {0}\\nPred: {1}'.format(label, pred))\n\n plt.show()\n\n\nif __name__ == '__main__':\n with open(\"config.yaml\", \"r\") as yamlfile:\n config = yaml.load(yamlfile, Loader=yaml.FullLoader)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # PointNet\n training_instance_2 = PointNetTrain(config['MODEL']['POINTNET'], device)\n modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING']) \n #training_instance_2.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)\n training_instance_2.test(modelnet10_dataloader.validloader)\n\n # Point Augment\n #training_instance_1 = PointAugmentTrain(config['MODEL']['POINT_AUGMENT'], device)\n #modelnet10_dataloader = DataLoaderClass(config['DATA']['MODELNET10'], config['MODEL']['POINTNET']['TRAINING']) \n #training_instance_1.train(modelnet10_dataloader.trainloader, modelnet10_dataloader.validloader, adv = False)\n #training_instance_1.test(modelnet10_dataloader.validloader)\n\n\n \n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def climb_ways(n, k):
|
normal
|
{
"blob_id": "05144338cc9c0c65010e0b8a3dd6fb50f6343214",
"index": 6641,
"step-1": "def climb_ways(n, k):",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright (c) 2019 NVIDIA Corporation
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core.neural_types import *
from nemo.core import DeviceType
import torch
from .datasets import BertPretrainingDataset
class BertPretrainingDataLayer(DataLayerNM):
@staticmethod
def create_ports():
input_ports = {}
output_ports = {
"input_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels":
NeuralType({0: AxisType(BatchTag)}),
}
return input_ports, output_ports
def __init__(self, *, tokenizer, dataset, name, max_seq_length,
sentence_indices_filename=None, mask_probability=0.15,
**kwargs):
DataLayerNM.__init__(self, **kwargs)
self._device = torch.device(
"cuda" if self.placement in [DeviceType.GPU, DeviceType.AllGpu]
else "cpu"
)
self._dataset = BertPretrainingDataset(
tokenizer=tokenizer,
dataset=dataset,
name=name,
sentence_indices_filename=sentence_indices_filename,
max_length=max_seq_length,
mask_probability=mask_probability)
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return self._dataset
@property
def data_iterator(self):
return None
|
normal
|
{
"blob_id": "a47ffd5df49ec627442a491f81a117b3e68ff50b",
"index": 2326,
"step-1": "<mask token>\n\n\nclass BertPretrainingDataLayer(DataLayerNM):\n <mask token>\n\n def __init__(self, *, tokenizer, dataset, name, max_seq_length,\n sentence_indices_filename=None, mask_probability=0.15, **kwargs):\n DataLayerNM.__init__(self, **kwargs)\n self._device = torch.device('cuda' if self.placement in [DeviceType\n .GPU, DeviceType.AllGpu] else 'cpu')\n self._dataset = BertPretrainingDataset(tokenizer=tokenizer, dataset\n =dataset, name=name, sentence_indices_filename=\n sentence_indices_filename, max_length=max_seq_length,\n mask_probability=mask_probability)\n\n def __len__(self):\n return len(self._dataset)\n <mask token>\n\n @property\n def data_iterator(self):\n return None\n",
"step-2": "<mask token>\n\n\nclass BertPretrainingDataLayer(DataLayerNM):\n <mask token>\n\n def __init__(self, *, tokenizer, dataset, name, max_seq_length,\n sentence_indices_filename=None, mask_probability=0.15, **kwargs):\n DataLayerNM.__init__(self, **kwargs)\n self._device = torch.device('cuda' if self.placement in [DeviceType\n .GPU, DeviceType.AllGpu] else 'cpu')\n self._dataset = BertPretrainingDataset(tokenizer=tokenizer, dataset\n =dataset, name=name, sentence_indices_filename=\n sentence_indices_filename, max_length=max_seq_length,\n mask_probability=mask_probability)\n\n def __len__(self):\n return len(self._dataset)\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def data_iterator(self):\n return None\n",
"step-3": "<mask token>\n\n\nclass BertPretrainingDataLayer(DataLayerNM):\n\n @staticmethod\n def create_ports():\n input_ports = {}\n output_ports = {'input_ids': NeuralType({(0): AxisType(BatchTag), (\n 1): AxisType(TimeTag)}), 'input_type_ids': NeuralType({(0):\n AxisType(BatchTag), (1): AxisType(TimeTag)}), 'input_mask':\n NeuralType({(0): AxisType(BatchTag), (1): AxisType(TimeTag)}),\n 'output_ids': NeuralType({(0): AxisType(BatchTag), (1):\n AxisType(TimeTag)}), 'output_mask': NeuralType({(0): AxisType(\n BatchTag), (1): AxisType(TimeTag)}), 'labels': NeuralType({(0):\n AxisType(BatchTag)})}\n return input_ports, output_ports\n\n def __init__(self, *, tokenizer, dataset, name, max_seq_length,\n sentence_indices_filename=None, mask_probability=0.15, **kwargs):\n DataLayerNM.__init__(self, **kwargs)\n self._device = torch.device('cuda' if self.placement in [DeviceType\n .GPU, DeviceType.AllGpu] else 'cpu')\n self._dataset = BertPretrainingDataset(tokenizer=tokenizer, dataset\n =dataset, name=name, sentence_indices_filename=\n sentence_indices_filename, max_length=max_seq_length,\n mask_probability=mask_probability)\n\n def __len__(self):\n return len(self._dataset)\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def data_iterator(self):\n return None\n",
"step-4": "from nemo.backends.pytorch.nm import DataLayerNM\nfrom nemo.core.neural_types import *\nfrom nemo.core import DeviceType\nimport torch\nfrom .datasets import BertPretrainingDataset\n\n\nclass BertPretrainingDataLayer(DataLayerNM):\n\n @staticmethod\n def create_ports():\n input_ports = {}\n output_ports = {'input_ids': NeuralType({(0): AxisType(BatchTag), (\n 1): AxisType(TimeTag)}), 'input_type_ids': NeuralType({(0):\n AxisType(BatchTag), (1): AxisType(TimeTag)}), 'input_mask':\n NeuralType({(0): AxisType(BatchTag), (1): AxisType(TimeTag)}),\n 'output_ids': NeuralType({(0): AxisType(BatchTag), (1):\n AxisType(TimeTag)}), 'output_mask': NeuralType({(0): AxisType(\n BatchTag), (1): AxisType(TimeTag)}), 'labels': NeuralType({(0):\n AxisType(BatchTag)})}\n return input_ports, output_ports\n\n def __init__(self, *, tokenizer, dataset, name, max_seq_length,\n sentence_indices_filename=None, mask_probability=0.15, **kwargs):\n DataLayerNM.__init__(self, **kwargs)\n self._device = torch.device('cuda' if self.placement in [DeviceType\n .GPU, DeviceType.AllGpu] else 'cpu')\n self._dataset = BertPretrainingDataset(tokenizer=tokenizer, dataset\n =dataset, name=name, sentence_indices_filename=\n sentence_indices_filename, max_length=max_seq_length,\n mask_probability=mask_probability)\n\n def __len__(self):\n return len(self._dataset)\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def data_iterator(self):\n return None\n",
"step-5": "# Copyright (c) 2019 NVIDIA Corporation\n\nfrom nemo.backends.pytorch.nm import DataLayerNM\nfrom nemo.core.neural_types import *\nfrom nemo.core import DeviceType\nimport torch\nfrom .datasets import BertPretrainingDataset\n\n\nclass BertPretrainingDataLayer(DataLayerNM):\n @staticmethod\n def create_ports():\n input_ports = {}\n output_ports = {\n \"input_ids\":\n NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag)\n }),\n \"input_type_ids\":\n NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag)\n }),\n \"input_mask\":\n NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag)\n }),\n \"output_ids\":\n NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag)\n }),\n \"output_mask\":\n NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag)\n }),\n \"labels\":\n NeuralType({0: AxisType(BatchTag)}),\n }\n\n return input_ports, output_ports\n\n def __init__(self, *, tokenizer, dataset, name, max_seq_length,\n sentence_indices_filename=None, mask_probability=0.15,\n **kwargs):\n DataLayerNM.__init__(self, **kwargs)\n\n self._device = torch.device(\n \"cuda\" if self.placement in [DeviceType.GPU, DeviceType.AllGpu]\n else \"cpu\"\n )\n\n self._dataset = BertPretrainingDataset(\n tokenizer=tokenizer,\n dataset=dataset,\n name=name,\n sentence_indices_filename=sentence_indices_filename,\n max_length=max_seq_length,\n mask_probability=mask_probability)\n\n def __len__(self):\n return len(self._dataset)\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def data_iterator(self):\n return None\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import tensorflow as tf
from vgg16 import vgg16
def content_loss(content_layer, generated_layer):
# sess.run(vgg_net.image.assign(generated_image))
# now we define the loss as the difference between the reference activations and
# the generated image activations in the specified layer
# return 1/2 * tf.nn.l2_loss(content_layer - generated_layer)
return tf.scalar_mul(.5, tf.nn.l2_loss(content_layer - generated_layer))
def style_loss(style_layers, generated_layers, weights):
layer_losses = []
for index in [0, 1, 2, 3]:
reference_layer = style_layers[index]
generated_image_layer = generated_layers[index]
N = reference_layer.shape[3]
M = reference_layer.shape[1] * reference_layer.shape[2]
# layer_losses.append(weights[index] * (4 / (M**2 * N**2)) * tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N)))
layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M**2 * N**2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N))))
return sum(layer_losses)
def get_gram_matrix(matrix, num_filters):
# first vectorize the matrix
matrix_vectorized = tf.reshape(matrix, [-1, num_filters])
# then calculate the gram by multiplying the vector by its transpose
return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)
# def run_vgg(sess, image):
# print "making the template", image.shape
# imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
# net = vgg16(imgs, 'vgg16_weights.npz', sess)
# print "model loaded"
# # net = VGG16({'data': image})
# # net.load(model_data_path, session)
# # session.run(net.get_output(), feed_dict={input_node: image})
# sess.run(net.probs, feed_dict={net.imgs: image})
# return net
|
normal
|
{
"blob_id": "f92b939bf9813e5c78bc450ff270d5fb6171792a",
"index": 4810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef content_loss(content_layer, generated_layer):\n return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))\n\n\n<mask token>\n\n\ndef get_gram_matrix(matrix, num_filters):\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n",
"step-3": "<mask token>\n\n\ndef content_loss(content_layer, generated_layer):\n return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))\n\n\ndef style_loss(style_layers, generated_layers, weights):\n layer_losses = []\n for index in [0, 1, 2, 3]:\n reference_layer = style_layers[index]\n generated_image_layer = generated_layers[index]\n N = reference_layer.shape[3]\n M = reference_layer.shape[1] * reference_layer.shape[2]\n layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M ** 2 * N **\n 2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) -\n get_gram_matrix(generated_image_layer, N))))\n return sum(layer_losses)\n\n\ndef get_gram_matrix(matrix, num_filters):\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n",
"step-4": "import tensorflow as tf\nfrom vgg16 import vgg16\n\n\ndef content_loss(content_layer, generated_layer):\n return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))\n\n\ndef style_loss(style_layers, generated_layers, weights):\n layer_losses = []\n for index in [0, 1, 2, 3]:\n reference_layer = style_layers[index]\n generated_image_layer = generated_layers[index]\n N = reference_layer.shape[3]\n M = reference_layer.shape[1] * reference_layer.shape[2]\n layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M ** 2 * N **\n 2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) -\n get_gram_matrix(generated_image_layer, N))))\n return sum(layer_losses)\n\n\ndef get_gram_matrix(matrix, num_filters):\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n",
"step-5": "import tensorflow as tf\nfrom vgg16 import vgg16\n\ndef content_loss(content_layer, generated_layer):\n # sess.run(vgg_net.image.assign(generated_image))\n\n # now we define the loss as the difference between the reference activations and \n # the generated image activations in the specified layer\n # return 1/2 * tf.nn.l2_loss(content_layer - generated_layer)\n return tf.scalar_mul(.5, tf.nn.l2_loss(content_layer - generated_layer))\n\ndef style_loss(style_layers, generated_layers, weights):\n \n layer_losses = []\n for index in [0, 1, 2, 3]:\n reference_layer = style_layers[index]\n generated_image_layer = generated_layers[index]\n\n N = reference_layer.shape[3]\n M = reference_layer.shape[1] * reference_layer.shape[2]\n # layer_losses.append(weights[index] * (4 / (M**2 * N**2)) * tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N)))\n layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M**2 * N**2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N))))\n\n return sum(layer_losses)\n\n\ndef get_gram_matrix(matrix, num_filters):\n # first vectorize the matrix\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n # then calculate the gram by multiplying the vector by its transpose\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n\n\n# def run_vgg(sess, image):\n# print \"making the template\", image.shape\n# imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])\n# net = vgg16(imgs, 'vgg16_weights.npz', sess)\n# print \"model loaded\"\n# # net = VGG16({'data': image})\n# # net.load(model_data_path, session)\n# # session.run(net.get_output(), feed_dict={input_node: image})\n# sess.run(net.probs, feed_dict={net.imgs: image})\n# return net\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def clearConsole():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
checkArgs()
rfile = open(sys.argv[1], 'r')
wfile = open(output_name, 'w')
parseAndStrip(rfile, wfile)
rfile.close()
wfile.close()
def checkArgs():
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(
'Usage Error:\t\tThe program needs (at least) an input filename to run.'
)
print('Correct Usage:\t\tpython titleStrip.py [input filename]')
print(
'Alternate Usage:\t\tpython titleStrip.py [input filename] [output filename]'
)
sys.exit(1)
if len(sys.argv) == 3:
global output_name
output_name = sys.argv[2]
def parseAndStrip(rfile, wfile):
while True:
line = rfile.readline()
if not line:
return
skip = 0
for key in strip_target:
if key in line:
skip = 1
if skip == 0:
wfile.write(line)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def clearConsole():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
checkArgs()
rfile = open(sys.argv[1], 'r')
wfile = open(output_name, 'w')
parseAndStrip(rfile, wfile)
rfile.close()
wfile.close()
def checkArgs():
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(
'Usage Error:\t\tThe program needs (at least) an input filename to run.'
)
print('Correct Usage:\t\tpython titleStrip.py [input filename]')
print(
'Alternate Usage:\t\tpython titleStrip.py [input filename] [output filename]'
)
sys.exit(1)
if len(sys.argv) == 3:
global output_name
output_name = sys.argv[2]
def parseAndStrip(rfile, wfile):
while True:
line = rfile.readline()
if not line:
return
skip = 0
for key in strip_target:
if key in line:
skip = 1
if skip == 0:
wfile.write(line)
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
strip_target = ['Wizards of', 'Random Generator']
output_name = 'titleStrip_out.txt'
def clearConsole():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
checkArgs()
rfile = open(sys.argv[1], 'r')
wfile = open(output_name, 'w')
parseAndStrip(rfile, wfile)
rfile.close()
wfile.close()
def checkArgs():
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(
'Usage Error:\t\tThe program needs (at least) an input filename to run.'
)
print('Correct Usage:\t\tpython titleStrip.py [input filename]')
print(
'Alternate Usage:\t\tpython titleStrip.py [input filename] [output filename]'
)
sys.exit(1)
if len(sys.argv) == 3:
global output_name
output_name = sys.argv[2]
def parseAndStrip(rfile, wfile):
while True:
line = rfile.readline()
if not line:
return
skip = 0
for key in strip_target:
if key in line:
skip = 1
if skip == 0:
wfile.write(line)
main()
<|reserved_special_token_1|>
import os, sys
strip_target = ['Wizards of', 'Random Generator']
output_name = 'titleStrip_out.txt'
def clearConsole():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
checkArgs()
rfile = open(sys.argv[1], 'r')
wfile = open(output_name, 'w')
parseAndStrip(rfile, wfile)
rfile.close()
wfile.close()
def checkArgs():
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(
'Usage Error:\t\tThe program needs (at least) an input filename to run.'
)
print('Correct Usage:\t\tpython titleStrip.py [input filename]')
print(
'Alternate Usage:\t\tpython titleStrip.py [input filename] [output filename]'
)
sys.exit(1)
if len(sys.argv) == 3:
global output_name
output_name = sys.argv[2]
def parseAndStrip(rfile, wfile):
while True:
line = rfile.readline()
if not line:
return
skip = 0
for key in strip_target:
if key in line:
skip = 1
if skip == 0:
wfile.write(line)
main()
<|reserved_special_token_1|>
################################################################################
#
# titleStrip.py
#
# Generates an output file with the titles of the input stripped
# Usage:
# python titleStrip.py [input filename] [output filename]
#
################################################################################
import os, sys
# Globals / Settings
strip_target = ['Wizards of', 'Random Generator'] # Keys for removal between input and output
output_name = 'titleStrip_out.txt' # default output filename is out.txt
# There will be a better home for this, mhm...
def clearConsole ():
os.system ('cls' if os.name == 'nt' else 'clear')
def main():
checkArgs()
# Open up the input / output files (read / write modes respectively)
rfile = open (sys.argv[1], 'r')
wfile = open (output_name, 'w')
parseAndStrip (rfile, wfile)
# Close the input / output files now that we are done
rfile.close()
wfile.close()
# checkArgs
# 1. Verifies that the number of arguments is acceptable
# 2. Reads in optional output filename
def checkArgs ():
# Verify number of input arguments
if len (sys.argv) < 2 or len (sys.argv) > 3:
print ("Usage Error:\t\tThe program needs (at least) an input filename to run.")
print ("Correct Usage:\t\tpython titleStrip.py [input filename]")
print ("Alternate Usage:\t\tpython titleStrip.py [input filename] [output filename]")
sys.exit(1)
# Read in optional output filename if any
if len (sys.argv) == 3:
global output_name # Use the global output_name
output_name = sys.argv [2] # Set the name
# parseAndStrip
# Reads through rfile and copies lines into wfile
# If we find a line to remove, we do not copy it into wfile
def parseAndStrip ( rfile, wfile ):
while True:
line = rfile.readline() # read in a line
if not line: return # leave this function if we are done
# Check to see if line has a key for removal
skip = 0
for key in strip_target:
if key in line:
skip = 1
# Only copy from rfile to wfile if skip == 0
if skip == 0:
wfile.write (line)
main()
|
flexible
|
{
"blob_id": "9c09309d23510aee4409a6d9021c2991afd2d349",
"index": 521,
"step-1": "<mask token>\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\nmain()\n",
"step-3": "<mask token>\nstrip_target = ['Wizards of', 'Random Generator']\noutput_name = 'titleStrip_out.txt'\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\nmain()\n",
"step-4": "import os, sys\nstrip_target = ['Wizards of', 'Random Generator']\noutput_name = 'titleStrip_out.txt'\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\nmain()\n",
"step-5": "################################################################################\n#\n#\ttitleStrip.py\n#\n#\tGenerates an output file with the titles of the input stripped\n#\tUsage:\n#\t\tpython titleStrip.py [input filename] [output filename]\n#\n################################################################################\n\nimport os, sys\n\n# Globals / Settings\nstrip_target = ['Wizards of', 'Random Generator']\t\t# Keys for removal between input and output\noutput_name = 'titleStrip_out.txt'\t\t\t\t\t\t# default output filename is out.txt\n\n# There will be a better home for this, mhm...\ndef clearConsole ():\n\tos.system ('cls' if os.name == 'nt' else 'clear')\n\t\ndef main():\n\tcheckArgs()\n\t\n\t# Open up the input / output files (read / write modes respectively)\n\trfile = open (sys.argv[1], 'r')\n\twfile = open (output_name, 'w')\n\n\tparseAndStrip (rfile, wfile) \n\t\n\t# Close the input / output files now that we are done\n\trfile.close()\n\twfile.close()\n\n# checkArgs\n#\t1. Verifies that the number of arguments is acceptable\n#\t2. Reads in optional output filename\ndef checkArgs ():\n\t# Verify number of input arguments\n\tif len (sys.argv) < 2 or len (sys.argv) > 3:\n\t\tprint (\"Usage Error:\\t\\tThe program needs (at least) an input filename to run.\")\n\t\tprint (\"Correct Usage:\\t\\tpython titleStrip.py [input filename]\")\n\t\tprint (\"Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]\")\n\t\tsys.exit(1)\n\t\n\t# Read in optional output filename if any\n\tif len (sys.argv) == 3:\n\t\tglobal output_name\t\t\t\t# Use the global output_name\n\t\toutput_name = sys.argv [2]\t\t# Set the name\n\n# parseAndStrip\n#\tReads through rfile and copies lines into wfile\n#\tIf we find a line to remove, we do not copy it into wfile\t\t\ndef parseAndStrip ( rfile, wfile ):\n\twhile True:\n\t\tline = rfile.readline()\t\t# read in a line\n\t\tif not line: return\t\t\t# leave this function if we are done\n\t\t\n\t\t# Check to see if line has a key for removal\n\t\tskip = 0\n\t\tfor key in strip_target:\n\t\t\tif key in line:\n\t\t\t\tskip = 1\n\t\t\n\t\t# Only copy from rfile to wfile if skip == 0\n\t\tif skip == 0:\n\t\t\twfile.write (line)\n\nmain()\n\t",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@jit
def resolve():
N = int(input())
ans = 0
for n in range(1, N + 1):
for m in range(n, N + 1, n):
ans += m
print(ans)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@jit
def resolve():
N = int(input())
ans = 0
for n in range(1, N + 1):
for m in range(n, N + 1, n):
ans += m
print(ans)
if __name__ == '__main__':
resolve()
<|reserved_special_token_1|>
from numba import jit
@jit
def resolve():
N = int(input())
ans = 0
for n in range(1, N + 1):
for m in range(n, N + 1, n):
ans += m
print(ans)
if __name__ == '__main__':
resolve()
<|reserved_special_token_1|>
from numba import jit
@jit
def resolve():
N = int(input())
ans = 0
for n in range(1, N+1):
for m in range(n, N+1, n):
ans += m
print(ans)
if __name__ == "__main__":
resolve()
|
flexible
|
{
"blob_id": "8d8df517ca5486e62cc1b5ac23bbcfa65ed9c1ff",
"index": 6611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@jit\ndef resolve():\n N = int(input())\n ans = 0\n for n in range(1, N + 1):\n for m in range(n, N + 1, n):\n ans += m\n print(ans)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@jit\ndef resolve():\n N = int(input())\n ans = 0\n for n in range(1, N + 1):\n for m in range(n, N + 1, n):\n ans += m\n print(ans)\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-4": "from numba import jit\n\n\n@jit\ndef resolve():\n N = int(input())\n ans = 0\n for n in range(1, N + 1):\n for m in range(n, N + 1, n):\n ans += m\n print(ans)\n\n\nif __name__ == '__main__':\n resolve()\n",
"step-5": "from numba import jit\n\n@jit\ndef resolve():\n N = int(input())\n\n ans = 0\n for n in range(1, N+1):\n for m in range(n, N+1, n):\n ans += m\n print(ans)\n\nif __name__ == \"__main__\":\n resolve()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(12):
col = []
for j in range(12):
col.append(float(input()))
v.append(col)
<|reserved_special_token_0|>
for i in range(1, 12):
for j in range(a):
s += v[i][j]
a += 1
if o == 'S':
print('%.1f' % s)
if o == 'M':
print('%.1f' % (s / 66))
<|reserved_special_token_1|>
o = input()
v = []
s = 0
for i in range(12):
col = []
for j in range(12):
col.append(float(input()))
v.append(col)
a = 1
for i in range(1, 12):
for j in range(a):
s += v[i][j]
a += 1
if o == 'S':
print('%.1f' % s)
if o == 'M':
print('%.1f' % (s / 66))
<|reserved_special_token_1|>
o = input()
v = []
s = 0
for i in range(12):
col = []
for j in range(12):
col.append(float(input()))
v.append(col)
a = 1
for i in range(1, 12):
for j in range(a):
s += v[i][j]
a+=1
if o == 'S':
print("%.1f"%s)
if o == 'M':
print("%.1f"%(s/66))
|
flexible
|
{
"blob_id": "0df20722fba6223c9d4fc9f72bfb399b479db6ac",
"index": 7917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\n<mask token>\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a += 1\nif o == 'S':\n print('%.1f' % s)\nif o == 'M':\n print('%.1f' % (s / 66))\n",
"step-3": "o = input()\nv = []\ns = 0\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\na = 1\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a += 1\nif o == 'S':\n print('%.1f' % s)\nif o == 'M':\n print('%.1f' % (s / 66))\n",
"step-4": "o = input()\nv = []\ns = 0\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\na = 1\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a+=1\nif o == 'S':\n print(\"%.1f\"%s)\nif o == 'M':\n print(\"%.1f\"%(s/66))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import user
# or from user import User
from post import Post
app_user_one = user.User("rr@gg.com", "Riks R", "ppp1", "student")
app_user_one.get_user_info()
app_user_one.change_status("in job market")
app_user_one.get_user_info()
app_user_two = user.User("z43@gg.com", "Bobby L", "zz1", "student")
app_user_two.get_user_info()
new_post = Post("Going for it", app_user_two.name)
new_post.get_post_info()
|
normal
|
{
"blob_id": "f59db28b669a41051cc6d0d4b8e14d1c7b0edd11",
"index": 2555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_user_one.get_user_info()\napp_user_one.change_status('in job market')\napp_user_one.get_user_info()\n<mask token>\napp_user_two.get_user_info()\n<mask token>\nnew_post.get_post_info()\n",
"step-3": "<mask token>\napp_user_one = user.User('rr@gg.com', 'Riks R', 'ppp1', 'student')\napp_user_one.get_user_info()\napp_user_one.change_status('in job market')\napp_user_one.get_user_info()\napp_user_two = user.User('z43@gg.com', 'Bobby L', 'zz1', 'student')\napp_user_two.get_user_info()\nnew_post = Post('Going for it', app_user_two.name)\nnew_post.get_post_info()\n",
"step-4": "import user\nfrom post import Post\napp_user_one = user.User('rr@gg.com', 'Riks R', 'ppp1', 'student')\napp_user_one.get_user_info()\napp_user_one.change_status('in job market')\napp_user_one.get_user_info()\napp_user_two = user.User('z43@gg.com', 'Bobby L', 'zz1', 'student')\napp_user_two.get_user_info()\nnew_post = Post('Going for it', app_user_two.name)\nnew_post.get_post_info()\n",
"step-5": "import user\n\n# or from user import User\nfrom post import Post\napp_user_one = user.User(\"rr@gg.com\", \"Riks R\", \"ppp1\", \"student\")\napp_user_one.get_user_info()\napp_user_one.change_status(\"in job market\")\napp_user_one.get_user_info()\n\n\napp_user_two = user.User(\"z43@gg.com\", \"Bobby L\", \"zz1\", \"student\")\napp_user_two.get_user_info()\n\nnew_post = Post(\"Going for it\", app_user_two.name)\nnew_post.get_post_info()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(sum([int(d) for d in str(pow(2, 1000))]))
|
flexible
|
{
"blob_id": "fc0c8deb3a5a57934c9e707911c352af55100c3c",
"index": 3533,
"step-1": "<mask token>\n",
"step-2": "print(sum([int(d) for d in str(pow(2, 1000))]))\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import random
def Fun_hiraganas():
hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko', 'sa', 'shi', 'su', 'se',
'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na', 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']
print("escriba el hiragana", hiraganas[random.randint(0, len(hiraganas)-1)])
print("Hello, type exit if you want to leave")
answer = ""
while answer.lower() != 'exit':
Fun_hiraganas()
answer = input("Type exit if you want to leave")
print("bye")
|
normal
|
{
"blob_id": "1fe7d5db1b47ba082301d07d010c6796fbd7edb7",
"index": 6859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Fun_hiraganas():\n hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko',\n 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na',\n 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n print('escriba el hiragana', hiraganas[random.randint(0, len(hiraganas) -\n 1)])\n\n\nprint('Hello, type exit if you want to leave')\n<mask token>\nwhile answer.lower() != 'exit':\n Fun_hiraganas()\n answer = input('Type exit if you want to leave')\nprint('bye')\n",
"step-3": "<mask token>\n\n\ndef Fun_hiraganas():\n hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko',\n 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na',\n 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n print('escriba el hiragana', hiraganas[random.randint(0, len(hiraganas) -\n 1)])\n\n\nprint('Hello, type exit if you want to leave')\nanswer = ''\nwhile answer.lower() != 'exit':\n Fun_hiraganas()\n answer = input('Type exit if you want to leave')\nprint('bye')\n",
"step-4": "import random\n\n\ndef Fun_hiraganas():\n hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko',\n 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na',\n 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n print('escriba el hiragana', hiraganas[random.randint(0, len(hiraganas) -\n 1)])\n\n\nprint('Hello, type exit if you want to leave')\nanswer = ''\nwhile answer.lower() != 'exit':\n Fun_hiraganas()\n answer = input('Type exit if you want to leave')\nprint('bye')\n",
"step-5": "import random\n\ndef Fun_hiraganas():\n\thiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko', 'sa', 'shi', 'su', 'se', \n\t'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na', 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n\tprint(\"escriba el hiragana\", hiraganas[random.randint(0, len(hiraganas)-1)])\n\nprint(\"Hello, type exit if you want to leave\")\nanswer = \"\"\nwhile answer.lower() != 'exit':\n\tFun_hiraganas() \n\tanswer = input(\"Type exit if you want to leave\")\nprint(\"bye\")\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Message(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Message(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def json_decode(self, jsondata):
self.uuid = jsondata['id']
self.message = jsondata['message']
self.user = jsondata['user']
self.timestamp = jsondata['timestamp']
def json_encode(self):
dict = {}
dict['id'] = self.uuid
dict['user'] = self.user
dict['message'] = self.message
dict['timestamp'] = self.timestamp
return dict
def __unicode__(self):
return str(self.timestamp) + ' ' + self.user + ': ' + self.message
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Message(models.Model):
uuid = models.CharField(max_length=50)
user = models.CharField(max_length=20)
message = models.CharField(max_length=200)
timestamp = models.DateTimeField()
def json_decode(self, jsondata):
self.uuid = jsondata['id']
self.message = jsondata['message']
self.user = jsondata['user']
self.timestamp = jsondata['timestamp']
def json_encode(self):
dict = {}
dict['id'] = self.uuid
dict['user'] = self.user
dict['message'] = self.message
dict['timestamp'] = self.timestamp
return dict
def __unicode__(self):
return str(self.timestamp) + ' ' + self.user + ': ' + self.message
<|reserved_special_token_1|>
from django.core import serializers
from django.db import models
from uuid import uuid4
from django.utils import timezone
from django.contrib.auth.models import User
class Message(models.Model):
uuid = models.CharField(max_length=50)
user = models.CharField(max_length=20)
message = models.CharField(max_length=200)
timestamp = models.DateTimeField()
def json_decode(self, jsondata):
self.uuid = jsondata['id']
self.message = jsondata['message']
self.user = jsondata['user']
self.timestamp = jsondata['timestamp']
def json_encode(self):
dict = {}
dict['id'] = self.uuid
dict['user'] = self.user
dict['message'] = self.message
dict['timestamp'] = self.timestamp
return dict
def __unicode__(self):
return str(self.timestamp) + ' ' + self.user + ': ' + self.message
<|reserved_special_token_1|>
from django.core import serializers
from django.db import models
from uuid import uuid4
from django.utils import timezone
from django.contrib.auth.models import User
class Message(models.Model):
uuid=models.CharField(max_length=50)
user=models.CharField(max_length=20)
message=models.CharField(max_length=200)
timestamp=models.DateTimeField()
def json_decode(self, jsondata):
self.uuid=jsondata['id']
self.message=jsondata['message']
self.user=jsondata['user']
self.timestamp=jsondata['timestamp']
def json_encode(self):
dict={}
dict['id']=self.uuid
dict['user']=self.user
dict['message']=self.message
dict['timestamp']=self.timestamp
return dict
def __unicode__(self):
return str(self.timestamp)+" "+self.user+": "+self.message
|
flexible
|
{
"blob_id": "1476d4f488e6c55234a34dc5b6182e3b8ad4f702",
"index": 6201,
"step-1": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def json_decode(self, jsondata):\n self.uuid = jsondata['id']\n self.message = jsondata['message']\n self.user = jsondata['user']\n self.timestamp = jsondata['timestamp']\n\n def json_encode(self):\n dict = {}\n dict['id'] = self.uuid\n dict['user'] = self.user\n dict['message'] = self.message\n dict['timestamp'] = self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp) + ' ' + self.user + ': ' + self.message\n",
"step-3": "<mask token>\n\n\nclass Message(models.Model):\n uuid = models.CharField(max_length=50)\n user = models.CharField(max_length=20)\n message = models.CharField(max_length=200)\n timestamp = models.DateTimeField()\n\n def json_decode(self, jsondata):\n self.uuid = jsondata['id']\n self.message = jsondata['message']\n self.user = jsondata['user']\n self.timestamp = jsondata['timestamp']\n\n def json_encode(self):\n dict = {}\n dict['id'] = self.uuid\n dict['user'] = self.user\n dict['message'] = self.message\n dict['timestamp'] = self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp) + ' ' + self.user + ': ' + self.message\n",
"step-4": "from django.core import serializers\nfrom django.db import models\nfrom uuid import uuid4\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\n\nclass Message(models.Model):\n uuid = models.CharField(max_length=50)\n user = models.CharField(max_length=20)\n message = models.CharField(max_length=200)\n timestamp = models.DateTimeField()\n\n def json_decode(self, jsondata):\n self.uuid = jsondata['id']\n self.message = jsondata['message']\n self.user = jsondata['user']\n self.timestamp = jsondata['timestamp']\n\n def json_encode(self):\n dict = {}\n dict['id'] = self.uuid\n dict['user'] = self.user\n dict['message'] = self.message\n dict['timestamp'] = self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp) + ' ' + self.user + ': ' + self.message\n",
"step-5": "from django.core import serializers\nfrom django.db import models\nfrom uuid import uuid4\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\nclass Message(models.Model):\n uuid=models.CharField(max_length=50)\n user=models.CharField(max_length=20)\n message=models.CharField(max_length=200)\n timestamp=models.DateTimeField()\n \n def json_decode(self, jsondata):\n self.uuid=jsondata['id']\n self.message=jsondata['message']\n self.user=jsondata['user']\n self.timestamp=jsondata['timestamp']\n\n def json_encode(self):\n dict={}\n dict['id']=self.uuid\n dict['user']=self.user\n dict['message']=self.message\n dict['timestamp']=self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp)+\" \"+self.user+\": \"+self.message\n \n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Entity_list_user(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Entity_list_Attendance(db.Model):
ID = db.Column(db.Integer, primary_key=True)
FacultyID = db.Column(db.String(250), nullable=False)
Name = db.Column(db.String(250), nullable=False)
Time = db.Column(db.String(250), nullable=False)
InOut = db.Column(db.String(250), nullable=False)
Date = db.Column(db.Date, nullable=False)
db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],
name='fk_FacultyID')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Entity_list_user(db.Model):
ID = db.Column(db.Integer, primary_key=True)
NationalID = db.Column(db.String(250), nullable=False)
FirstName = db.Column(db.String(250), nullable=False)
LastName = db.Column(db.String(250), nullable=False)
Email = db.Column(db.String(250), nullable=False)
Password = db.Column(db.String(250), nullable=False)
FacultyID = db.Column(db.String(250))
Faculty = db.Column(db.String(250))
Dept = db.Column(db.String(250))
UserType = db.Column(db.String(250), nullable=False)
class Entity_list_Attendance(db.Model):
ID = db.Column(db.Integer, primary_key=True)
FacultyID = db.Column(db.String(250), nullable=False)
Name = db.Column(db.String(250), nullable=False)
Time = db.Column(db.String(250), nullable=False)
InOut = db.Column(db.String(250), nullable=False)
Date = db.Column(db.Date, nullable=False)
db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],
name='fk_FacultyID')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
params = urllib.parse.quote_plus(
'Driver={SQL Server};Server=YoussefSami;Database=CLS_DB2;Trusted_Connection=yes;'
)
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['Access-Control-Allow-Origin'] = '*'
app.config['DEBUG'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'thisissecretkey'
app.config['SQLALCHEMY_DATABASE_URI'
] = 'mssql+pyodbc:///?odbc_connect=%s' % params
db = SQLAlchemy(app)
class Entity_list_user(db.Model):
ID = db.Column(db.Integer, primary_key=True)
NationalID = db.Column(db.String(250), nullable=False)
FirstName = db.Column(db.String(250), nullable=False)
LastName = db.Column(db.String(250), nullable=False)
Email = db.Column(db.String(250), nullable=False)
Password = db.Column(db.String(250), nullable=False)
FacultyID = db.Column(db.String(250))
Faculty = db.Column(db.String(250))
Dept = db.Column(db.String(250))
UserType = db.Column(db.String(250), nullable=False)
class Entity_list_Attendance(db.Model):
ID = db.Column(db.Integer, primary_key=True)
FacultyID = db.Column(db.String(250), nullable=False)
Name = db.Column(db.String(250), nullable=False)
Time = db.Column(db.String(250), nullable=False)
InOut = db.Column(db.String(250), nullable=False)
Date = db.Column(db.Date, nullable=False)
db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],
name='fk_FacultyID')
<|reserved_special_token_1|>
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import urllib
from flask import Flask
params = urllib.parse.quote_plus(
'Driver={SQL Server};Server=YoussefSami;Database=CLS_DB2;Trusted_Connection=yes;'
)
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['Access-Control-Allow-Origin'] = '*'
app.config['DEBUG'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'thisissecretkey'
app.config['SQLALCHEMY_DATABASE_URI'
] = 'mssql+pyodbc:///?odbc_connect=%s' % params
db = SQLAlchemy(app)
class Entity_list_user(db.Model):
ID = db.Column(db.Integer, primary_key=True)
NationalID = db.Column(db.String(250), nullable=False)
FirstName = db.Column(db.String(250), nullable=False)
LastName = db.Column(db.String(250), nullable=False)
Email = db.Column(db.String(250), nullable=False)
Password = db.Column(db.String(250), nullable=False)
FacultyID = db.Column(db.String(250))
Faculty = db.Column(db.String(250))
Dept = db.Column(db.String(250))
UserType = db.Column(db.String(250), nullable=False)
class Entity_list_Attendance(db.Model):
ID = db.Column(db.Integer, primary_key=True)
FacultyID = db.Column(db.String(250), nullable=False)
Name = db.Column(db.String(250), nullable=False)
Time = db.Column(db.String(250), nullable=False)
InOut = db.Column(db.String(250), nullable=False)
Date = db.Column(db.Date, nullable=False)
db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],
name='fk_FacultyID')
<|reserved_special_token_1|>
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import urllib
from flask import Flask
########################################################################################DataBase@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@2
#connection string
params = urllib.parse.quote_plus('Driver={SQL Server};'
'Server=YoussefSami;'
'Database=CLS_DB2;'
'Trusted_Connection=yes;')
#init flas app
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS']='Content-Type'
app.config['Access-Control-Allow-Origin'] ='*'
app.config["DEBUG"]=True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] =True
app.config['TESTING']=True
app.config['SECRET_KEY']='thisissecretkey'
#init db
app.config['SQLALCHEMY_DATABASE_URI'] = "mssql+pyodbc:///?odbc_connect=%s" % params
db=SQLAlchemy(app)
#create modules for database
class Entity_list_user(db.Model):
ID = db.Column(db.Integer, primary_key=True)
NationalID = db.Column(db.String(250),nullable=False)
FirstName = db.Column(db.String(250), nullable=False)
LastName = db.Column(db.String(250), nullable=False)
Email = db.Column(db.String(250), nullable=False)
Password = db.Column(db.String(250), nullable=False)
FacultyID = db.Column(db.String(250))
Faculty = db.Column(db.String(250))
Dept = db.Column(db.String(250))
UserType=db.Column(db.String(250),nullable=False)
class Entity_list_Attendance(db.Model):
ID = db.Column(db.Integer, primary_key=True, )
FacultyID = db.Column(db.String(250),nullable=False)
Name = db.Column(db.String(250), nullable=False)
Time = db.Column(db.String(250), nullable=False)
InOut = db.Column(db.String(250), nullable=False)
Date = db.Column(db.Date, nullable=False)
db.ForeignKeyConstraint(
['FacultyID'], ['Entity_list_user.FacultyID'],
name='fk_FacultyID'
)
|
flexible
|
{
"blob_id": "6928ff58ddb97883a43dfd867ff9a89db72ae348",
"index": 6567,
"step-1": "<mask token>\n\n\nclass Entity_list_user(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Entity_list_Attendance(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n FacultyID = db.Column(db.String(250), nullable=False)\n Name = db.Column(db.String(250), nullable=False)\n Time = db.Column(db.String(250), nullable=False)\n InOut = db.Column(db.String(250), nullable=False)\n Date = db.Column(db.Date, nullable=False)\n db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],\n name='fk_FacultyID')\n",
"step-2": "<mask token>\n\n\nclass Entity_list_user(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n NationalID = db.Column(db.String(250), nullable=False)\n FirstName = db.Column(db.String(250), nullable=False)\n LastName = db.Column(db.String(250), nullable=False)\n Email = db.Column(db.String(250), nullable=False)\n Password = db.Column(db.String(250), nullable=False)\n FacultyID = db.Column(db.String(250))\n Faculty = db.Column(db.String(250))\n Dept = db.Column(db.String(250))\n UserType = db.Column(db.String(250), nullable=False)\n\n\nclass Entity_list_Attendance(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n FacultyID = db.Column(db.String(250), nullable=False)\n Name = db.Column(db.String(250), nullable=False)\n Time = db.Column(db.String(250), nullable=False)\n InOut = db.Column(db.String(250), nullable=False)\n Date = db.Column(db.Date, nullable=False)\n db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],\n name='fk_FacultyID')\n",
"step-3": "<mask token>\nparams = urllib.parse.quote_plus(\n 'Driver={SQL Server};Server=YoussefSami;Database=CLS_DB2;Trusted_Connection=yes;'\n )\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\napp.config['Access-Control-Allow-Origin'] = '*'\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['TESTING'] = True\napp.config['SECRET_KEY'] = 'thisissecretkey'\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'mssql+pyodbc:///?odbc_connect=%s' % params\ndb = SQLAlchemy(app)\n\n\nclass Entity_list_user(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n NationalID = db.Column(db.String(250), nullable=False)\n FirstName = db.Column(db.String(250), nullable=False)\n LastName = db.Column(db.String(250), nullable=False)\n Email = db.Column(db.String(250), nullable=False)\n Password = db.Column(db.String(250), nullable=False)\n FacultyID = db.Column(db.String(250))\n Faculty = db.Column(db.String(250))\n Dept = db.Column(db.String(250))\n UserType = db.Column(db.String(250), nullable=False)\n\n\nclass Entity_list_Attendance(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n FacultyID = db.Column(db.String(250), nullable=False)\n Name = db.Column(db.String(250), nullable=False)\n Time = db.Column(db.String(250), nullable=False)\n InOut = db.Column(db.String(250), nullable=False)\n Date = db.Column(db.Date, nullable=False)\n db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],\n name='fk_FacultyID')\n",
"step-4": "from flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport urllib\nfrom flask import Flask\nparams = urllib.parse.quote_plus(\n 'Driver={SQL Server};Server=YoussefSami;Database=CLS_DB2;Trusted_Connection=yes;'\n )\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\napp.config['Access-Control-Allow-Origin'] = '*'\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['TESTING'] = True\napp.config['SECRET_KEY'] = 'thisissecretkey'\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'mssql+pyodbc:///?odbc_connect=%s' % params\ndb = SQLAlchemy(app)\n\n\nclass Entity_list_user(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n NationalID = db.Column(db.String(250), nullable=False)\n FirstName = db.Column(db.String(250), nullable=False)\n LastName = db.Column(db.String(250), nullable=False)\n Email = db.Column(db.String(250), nullable=False)\n Password = db.Column(db.String(250), nullable=False)\n FacultyID = db.Column(db.String(250))\n Faculty = db.Column(db.String(250))\n Dept = db.Column(db.String(250))\n UserType = db.Column(db.String(250), nullable=False)\n\n\nclass Entity_list_Attendance(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n FacultyID = db.Column(db.String(250), nullable=False)\n Name = db.Column(db.String(250), nullable=False)\n Time = db.Column(db.String(250), nullable=False)\n InOut = db.Column(db.String(250), nullable=False)\n Date = db.Column(db.Date, nullable=False)\n db.ForeignKeyConstraint(['FacultyID'], ['Entity_list_user.FacultyID'],\n name='fk_FacultyID')\n",
"step-5": "from flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport urllib\nfrom flask import Flask\n########################################################################################DataBase@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@2\n#connection string\nparams = urllib.parse.quote_plus('Driver={SQL Server};'\n 'Server=YoussefSami;'\n 'Database=CLS_DB2;'\n 'Trusted_Connection=yes;')\n#init flas app\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS']='Content-Type'\napp.config['Access-Control-Allow-Origin'] ='*'\napp.config[\"DEBUG\"]=True\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] =True\napp.config['TESTING']=True\napp.config['SECRET_KEY']='thisissecretkey'\n#init db\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mssql+pyodbc:///?odbc_connect=%s\" % params\ndb=SQLAlchemy(app)\n\n#create modules for database\nclass Entity_list_user(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n NationalID = db.Column(db.String(250),nullable=False)\n FirstName = db.Column(db.String(250), nullable=False)\n LastName = db.Column(db.String(250), nullable=False)\n Email = db.Column(db.String(250), nullable=False)\n Password = db.Column(db.String(250), nullable=False)\n FacultyID = db.Column(db.String(250))\n Faculty = db.Column(db.String(250))\n Dept = db.Column(db.String(250))\n UserType=db.Column(db.String(250),nullable=False)\n\n\nclass Entity_list_Attendance(db.Model):\n ID = db.Column(db.Integer, primary_key=True, )\n FacultyID = db.Column(db.String(250),nullable=False)\n Name = db.Column(db.String(250), nullable=False)\n Time = db.Column(db.String(250), nullable=False)\n InOut = db.Column(db.String(250), nullable=False)\n Date = db.Column(db.Date, nullable=False)\n db.ForeignKeyConstraint(\n ['FacultyID'], ['Entity_list_user.FacultyID'],\n name='fk_FacultyID'\n )\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# module: order functionality
# HW2: complete this func
def process_option(food, option):
# print(food.keys())
food_name = list(food.keys())[option-1]
food_price = food[food_name]
print(food_price)
print("You have chosen: ", option, food_name, "!", " For unit price: ", food_price)
# HW2: ask quantity
# if ENTER = cancel
# if ent numb = calc total (func separate func)
# print total
# ask confirmation (y/n)
# ask for costumer name
# save the order data in data/<name>order.txt
q = int(input("How many? "))
total = q * food_price
print(food_name, "x", q, "=", total)
# file = open("copy.txt", "w")
# file.write("Your text goes here")
# file.close()
client_name = input("Your name pls: ")
# file = open("data/" + client_name + ".txt", "w")
# file.write(food_name + "|" + str(q) + str(food_price) + "|" + str(total))
# file.close()
with open("data/" + client_name + ".txt", "w") as file:
file.write(food_name + "|" + str(q) + "|" + str(food_price) + "|" + str(total))
def confirmation():
c = input("Press y/n for confirmation: ")
if c == "y":
print("Reservation confirmed!")
elif c == "n":
print("Reservation decline!")
elif c == "":
print("Cancel reservation")
else:
print("CK next time...")
def show_order_info():
client_name = input("Your name in data: ")
file = open("data/" + client_name + ".txt", "r")
data = file.read()
file.close()
print(data)
|
normal
|
{
"blob_id": "07bd3c7cacbf8d0e39d06b21456258ad92cb2294",
"index": 676,
"step-1": "<mask token>\n",
"step-2": "def process_option(food, option):\n food_name = list(food.keys())[option - 1]\n food_price = food[food_name]\n print(food_price)\n print('You have chosen: ', option, food_name, '!', ' For unit price: ',\n food_price)\n q = int(input('How many? '))\n total = q * food_price\n print(food_name, 'x', q, '=', total)\n client_name = input('Your name pls: ')\n with open('data/' + client_name + '.txt', 'w') as file:\n file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +\n str(total))\n\n\n<mask token>\n",
"step-3": "def process_option(food, option):\n food_name = list(food.keys())[option - 1]\n food_price = food[food_name]\n print(food_price)\n print('You have chosen: ', option, food_name, '!', ' For unit price: ',\n food_price)\n q = int(input('How many? '))\n total = q * food_price\n print(food_name, 'x', q, '=', total)\n client_name = input('Your name pls: ')\n with open('data/' + client_name + '.txt', 'w') as file:\n file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +\n str(total))\n\n\ndef confirmation():\n c = input('Press y/n for confirmation: ')\n if c == 'y':\n print('Reservation confirmed!')\n elif c == 'n':\n print('Reservation decline!')\n elif c == '':\n print('Cancel reservation')\n else:\n print('CK next time...')\n\n\n<mask token>\n",
"step-4": "def process_option(food, option):\n food_name = list(food.keys())[option - 1]\n food_price = food[food_name]\n print(food_price)\n print('You have chosen: ', option, food_name, '!', ' For unit price: ',\n food_price)\n q = int(input('How many? '))\n total = q * food_price\n print(food_name, 'x', q, '=', total)\n client_name = input('Your name pls: ')\n with open('data/' + client_name + '.txt', 'w') as file:\n file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +\n str(total))\n\n\ndef confirmation():\n c = input('Press y/n for confirmation: ')\n if c == 'y':\n print('Reservation confirmed!')\n elif c == 'n':\n print('Reservation decline!')\n elif c == '':\n print('Cancel reservation')\n else:\n print('CK next time...')\n\n\ndef show_order_info():\n client_name = input('Your name in data: ')\n file = open('data/' + client_name + '.txt', 'r')\n data = file.read()\n file.close()\n print(data)\n",
"step-5": "\r\n# module: order functionality\r\n\r\n\r\n# HW2: complete this func\r\n\r\ndef process_option(food, option):\r\n # print(food.keys())\r\n food_name = list(food.keys())[option-1]\r\n food_price = food[food_name]\r\n\r\n print(food_price)\r\n print(\"You have chosen: \", option, food_name, \"!\", \" For unit price: \", food_price)\r\n\r\n # HW2: ask quantity\r\n # if ENTER = cancel\r\n\r\n # if ent numb = calc total (func separate func)\r\n # print total\r\n # ask confirmation (y/n)\r\n # ask for costumer name\r\n # save the order data in data/<name>order.txt\r\n\r\n q = int(input(\"How many? \"))\r\n total = q * food_price\r\n print(food_name, \"x\", q, \"=\", total)\r\n\r\n\r\n # file = open(\"copy.txt\", \"w\")\r\n # file.write(\"Your text goes here\")\r\n # file.close()\r\n\r\n client_name = input(\"Your name pls: \")\r\n # file = open(\"data/\" + client_name + \".txt\", \"w\")\r\n # file.write(food_name + \"|\" + str(q) + str(food_price) + \"|\" + str(total))\r\n # file.close()\r\n\r\n with open(\"data/\" + client_name + \".txt\", \"w\") as file:\r\n file.write(food_name + \"|\" + str(q) + \"|\" + str(food_price) + \"|\" + str(total))\r\n\r\n\r\n\r\ndef confirmation():\r\n c = input(\"Press y/n for confirmation: \")\r\n if c == \"y\":\r\n print(\"Reservation confirmed!\")\r\n elif c == \"n\":\r\n print(\"Reservation decline!\")\r\n elif c == \"\":\r\n print(\"Cancel reservation\")\r\n else:\r\n print(\"CK next time...\")\r\n\r\n\r\ndef show_order_info():\r\n client_name = input(\"Your name in data: \")\r\n file = open(\"data/\" + client_name + \".txt\", \"r\")\r\n data = file.read()\r\n file.close()\r\n print(data)\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, render_template, send_from_directory
from flask import request, send_file
from flask_cors import CORS
import os
import json
from crossdomain import crossdomain
import constants
import generation_tools
from music_theory import name_chords_in_tracks
import midi_tools
from client_logging import ClientLogger
from generation_tools import Generator
app = Flask(__name__)
CORS(app)
BASE_URL = os.path.abspath(os.path.dirname(__file__))
CLIENT_APP_FOLDER = os.path.join(BASE_URL, "ClientApp")
DawState = {}
ClientLogger = ClientLogger()
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult' : result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult' : result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename,
mimetype='audio/midi audio/x-midi',
as_attachment=True,
attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
|
normal
|
{
"blob_id": "471cab65aac29f5b47de0ffef8f032dbbadf8dd0",
"index": 1877,
"step-1": "<mask token>\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n<mask token>\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-2": "<mask token>\n\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-3": "<mask token>\nCORS(app)\n<mask token>\n\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-4": "from flask import Flask, render_template, send_from_directory\nfrom flask import request, send_file\nfrom flask_cors import CORS\nimport os\nimport json\nfrom crossdomain import crossdomain\nimport constants\nimport generation_tools\nfrom music_theory import name_chords_in_tracks\nimport midi_tools\nfrom client_logging import ClientLogger\nfrom generation_tools import Generator\napp = Flask(__name__)\nCORS(app)\nBASE_URL = os.path.abspath(os.path.dirname(__file__))\nCLIENT_APP_FOLDER = os.path.join(BASE_URL, 'ClientApp')\nDawState = {}\nClientLogger = ClientLogger()\n\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-5": "from flask import Flask, render_template, send_from_directory\nfrom flask import request, send_file\nfrom flask_cors import CORS\nimport os\nimport json\nfrom crossdomain import crossdomain\nimport constants\nimport generation_tools\nfrom music_theory import name_chords_in_tracks\nimport midi_tools\nfrom client_logging import ClientLogger\nfrom generation_tools import Generator\napp = Flask(__name__)\nCORS(app)\n\nBASE_URL = os.path.abspath(os.path.dirname(__file__))\nCLIENT_APP_FOLDER = os.path.join(BASE_URL, \"ClientApp\")\n\nDawState = {}\nClientLogger = ClientLogger()\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n \n response = {'generationResult' : result}\n return json.dumps(add_logs_to_response(response))\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n\n DawState['chord_names'] = result_chord_names\n response = {'generationResult' : result_chords}\n return json.dumps(add_logs_to_response(response))\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n \n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n \n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename,\n mimetype='audio/midi audio/x-midi',\n as_attachment=True,\n attachment_filename=filename)\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def backup_cron():
if settings.DBBACKUP_STORAGE is not '':
management.call_command('dbbackup')
<|reserved_special_token_1|>
from django.core import management
from django.conf import settings
def backup_cron():
if settings.DBBACKUP_STORAGE is not '':
management.call_command('dbbackup')
|
flexible
|
{
"blob_id": "ae9f1c4f70801dace0455c051ba4d4bfb7f3fe67",
"index": 4813,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef backup_cron():\n if settings.DBBACKUP_STORAGE is not '':\n management.call_command('dbbackup')\n",
"step-3": "from django.core import management\nfrom django.conf import settings\n\n\ndef backup_cron():\n if settings.DBBACKUP_STORAGE is not '':\n management.call_command('dbbackup')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from typing import Tuple
#Creating a trie structure and it's node
class TrieNode(object):
def __init__(self, char: str):
self.char = char
self.children = []
#the last character of the word.`
self.word_finished = False
#counter for this character
self.counter = 1
#list of all the occurences of the prefix in the documents
self.OccurrenceList={}
#Initialize the root of the trie
root = TrieNode('*')
#Adding a word in the trie structure
def insert(root, word: str,document):
node = root
for char in word:
found_in_child = False
# Search for the character in the children of the present `node`
for child in node.children:
if child.char == char:
#the char of the word to be inserted is already present in trie; increment the counter of this char
child.counter += 1
# move the pointer to the node's child to continue the insertion of the rest of the word
node = child
found_in_child = True
break
# this char has never been inserted before, create node and insert it
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
# And then point node to the new child
node = new_node
# At this point, word is inserted- we mark the end of this word
node.word_finished = True
if document not in node.OccurrenceList: #If document is not in OccurenceList for that word
node.OccurrenceList[document]=1 # Create a new key with document name
node.OccurrenceList[document]= node.OccurrenceList[document]+1 # We append the position in the document
#Performing the search in our files for the input word, using the trie structure we created above
#We will first check for the word's existence, if it exists- return file name and occurence number
def find_prefix(root, prefix: str) -> Tuple[bool, int]:
node = root
#handling the case of an empty trie ie the root node has no children
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
# Search through all the children of the node the pointer is pointing to
for child in node.children:
if child.char == char:
#the char of the input word exists in trie
char_not_found = False
# increment the pointer to go further down the trie to check for the remaining chars in prefix
node = child
break
#letting the user know that the input word of prefix doesn't exist in the trie
if char_not_found:
print("Word Not Found: " +prefix)
#input word found, return the found status, along the files in which it exists
else:
print("Word Found: " +prefix)
return True,node.OccurrenceList
#for scrapping words from website
from bs4 import BeautifulSoup
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
import string
stop_words = set(stopwords.words('english'))
stop_words.update(string.punctuation)
import os
#selecting file for scrapping into fdata->files
#please change the dircectory to run on your device
fdata = r"./input/"
files=os.listdir(fdata)
#cleaning the text in every every file from punctuations, stop words, digits, words less than length 2 and other symbols
for file in files:
fname=file #called later, while associating word with the file it exists in for insertion in trie
file=open(fdata+str(file), encoding="utf8")
soup = BeautifulSoup(file.read(), 'html.parser')
#filter the soup
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
#gather words from filtered soup
words = word_tokenize(soup.get_text())
# remove the words containing punctuation
words = [i for i in words if all(j not in string.punctuation for j in i)]
#filtering words and cleaning the data to insert in trie
for word in words:
if word.lower() not in stop_words and len(word) > 2 and word.isdigit() == False:
# build compressed trie tree
try:
# remove the words whcih can't encode to ascII
word = word.lower().strip().encode('ascII')
except:
# print word
a = 1
else:
#inserting words into tree
insert(root, word.decode("utf-8"), fname)
# Asking the user for input word that we search
Enter = input("Please enter what you would like to search for: ")
#In case if multiple word search
inp = Enter.split(' ')
rank = {}
#searching for each word of the input
for word in inp:
#search in trie, store the result in dic
boolw,dic = find_prefix(root, word.lower())
#ranking the files in which the word was present
for key in dic:
if key not in rank:
rank[key] = dic[key]
else:
rank[key] = rank[key] + dic[key]
#ranking website based on number of time word present - sort them in acsending order and reversing them so we display
# the websites in order of relevance
items=[(v,k) for k,v in rank.items()]
items.sort()
items.reverse()
#displaying search results
if not items:
print("No results")
else:
print("Results : ")
#printing all the files the input was found in, in order of maximum occurences
for key in items:
print(key)
|
normal
|
{
"blob_id": "dcda8f26a06145579a9be6e5fbfdaed83d4908da",
"index": 2459,
"step-1": "<mask token>\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\n<mask token>\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\n<mask token>\n\n\ndef insert(root, word: str, document):\n node = root\n for char in word:\n found_in_child = False\n for child in node.children:\n if child.char == char:\n child.counter += 1\n node = child\n found_in_child = True\n break\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n node = new_node\n node.word_finished = True\n if document not in node.OccurrenceList:\n node.OccurrenceList[document] = 1\n node.OccurrenceList[document] = node.OccurrenceList[document] + 1\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\n<mask token>\nnltk.download('stopwords')\nnltk.download('punkt')\n<mask token>\nstop_words.update(string.punctuation)\n<mask token>\nfor file in files:\n fname = file\n file = open(fdata + str(file), encoding='utf8')\n soup = BeautifulSoup(file.read(), 'html.parser')\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n words = word_tokenize(soup.get_text())\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit(\n ) == False:\n try:\n word = word.lower().strip().encode('ascII')\n except:\n a = 1\n else:\n insert(root, word.decode('utf-8'), fname)\n<mask token>\nfor word in inp:\n boolw, dic = find_prefix(root, word.lower())\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\n<mask token>\nitems.sort()\nitems.reverse()\nif not items:\n print('No results')\nelse:\n print('Results : ')\n for key in items:\n print(key)\n",
"step-3": "<mask token>\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\nroot = TrieNode('*')\n\n\ndef insert(root, word: str, document):\n node = root\n for char in word:\n found_in_child = False\n for child in node.children:\n if child.char == char:\n child.counter += 1\n node = child\n found_in_child = True\n break\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n node = new_node\n node.word_finished = True\n if document not in node.OccurrenceList:\n node.OccurrenceList[document] = 1\n node.OccurrenceList[document] = node.OccurrenceList[document] + 1\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\n<mask token>\nnltk.download('stopwords')\nnltk.download('punkt')\n<mask token>\nstop_words = set(stopwords.words('english'))\nstop_words.update(string.punctuation)\n<mask token>\nfdata = './input/'\nfiles = os.listdir(fdata)\nfor file in files:\n fname = file\n file = open(fdata + str(file), encoding='utf8')\n soup = BeautifulSoup(file.read(), 'html.parser')\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n words = word_tokenize(soup.get_text())\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit(\n ) == False:\n try:\n word = word.lower().strip().encode('ascII')\n except:\n a = 1\n else:\n insert(root, word.decode('utf-8'), fname)\nEnter = input('Please enter what you would like to search for: ')\ninp = Enter.split(' ')\nrank = {}\nfor word in inp:\n boolw, dic = find_prefix(root, word.lower())\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\nitems = [(v, k) for k, v in rank.items()]\nitems.sort()\nitems.reverse()\nif not items:\n print('No results')\nelse:\n print('Results : ')\n for key in items:\n print(key)\n",
"step-4": "from typing import Tuple\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\nroot = TrieNode('*')\n\n\ndef insert(root, word: str, document):\n node = root\n for char in word:\n found_in_child = False\n for child in node.children:\n if child.char == char:\n child.counter += 1\n node = child\n found_in_child = True\n break\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n node = new_node\n node.word_finished = True\n if document not in node.OccurrenceList:\n node.OccurrenceList[document] = 1\n node.OccurrenceList[document] = node.OccurrenceList[document] + 1\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\nfrom bs4 import BeautifulSoup\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport re\nimport string\nstop_words = set(stopwords.words('english'))\nstop_words.update(string.punctuation)\nimport os\nfdata = './input/'\nfiles = os.listdir(fdata)\nfor file in files:\n fname = file\n file = open(fdata + str(file), encoding='utf8')\n soup = BeautifulSoup(file.read(), 'html.parser')\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n words = word_tokenize(soup.get_text())\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit(\n ) == False:\n try:\n word = word.lower().strip().encode('ascII')\n except:\n a = 1\n else:\n insert(root, word.decode('utf-8'), fname)\nEnter = input('Please enter what you would like to search for: ')\ninp = Enter.split(' ')\nrank = {}\nfor word in inp:\n boolw, dic = find_prefix(root, word.lower())\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\nitems = [(v, k) for k, v in rank.items()]\nitems.sort()\nitems.reverse()\nif not items:\n print('No results')\nelse:\n print('Results : ')\n for key in items:\n print(key)\n",
"step-5": "from typing import Tuple\n\n#Creating a trie structure and it's node\nclass TrieNode(object): \n def __init__(self, char: str):\n self.char = char\n self.children = []\n #the last character of the word.`\n self.word_finished = False\n #counter for this character\n self.counter = 1\n #list of all the occurences of the prefix in the documents \n self.OccurrenceList={}\n \n#Initialize the root of the trie \nroot = TrieNode('*')\n\n#Adding a word in the trie structure\ndef insert(root, word: str,document):\n node = root\n for char in word:\n found_in_child = False\n # Search for the character in the children of the present `node`\n for child in node.children:\n if child.char == char:\n #the char of the word to be inserted is already present in trie; increment the counter of this char \n child.counter += 1\n # move the pointer to the node's child to continue the insertion of the rest of the word\n node = child\n found_in_child = True\n break\n # this char has never been inserted before, create node and insert it\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n # And then point node to the new child\n node = new_node\n \n # At this point, word is inserted- we mark the end of this word\n node.word_finished = True\n if document not in node.OccurrenceList: #If document is not in OccurenceList for that word\n node.OccurrenceList[document]=1 # Create a new key with document name\n node.OccurrenceList[document]= node.OccurrenceList[document]+1 # We append the position in the document \n \n#Performing the search in our files for the input word, using the trie structure we created above\n#We will first check for the word's existence, if it exists- return file name and occurence number \ndef find_prefix(root, prefix: str) -> Tuple[bool, int]:\n node = root\n #handling the case of an empty trie ie the root node has no children\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n # Search through all the children of the node the pointer is pointing to\n for child in node.children:\n if child.char == char:\n #the char of the input word exists in trie\n char_not_found = False\n # increment the pointer to go further down the trie to check for the remaining chars in prefix\n node = child\n break\n #letting the user know that the input word of prefix doesn't exist in the trie \n if char_not_found:\n print(\"Word Not Found: \" +prefix)\n #input word found, return the found status, along the files in which it exists\n else: \n print(\"Word Found: \" +prefix)\n return True,node.OccurrenceList\n\n#for scrapping words from website\nfrom bs4 import BeautifulSoup\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport re\nimport string\nstop_words = set(stopwords.words('english'))\nstop_words.update(string.punctuation) \nimport os\n\n#selecting file for scrapping into fdata->files\n#please change the dircectory to run on your device\nfdata = r\"./input/\"\nfiles=os.listdir(fdata)\n#cleaning the text in every every file from punctuations, stop words, digits, words less than length 2 and other symbols\nfor file in files: \n fname=file #called later, while associating word with the file it exists in for insertion in trie\n file=open(fdata+str(file), encoding=\"utf8\")\n soup = BeautifulSoup(file.read(), 'html.parser')\n #filter the soup\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n #gather words from filtered soup\n words = word_tokenize(soup.get_text())\n # remove the words containing punctuation\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n #filtering words and cleaning the data to insert in trie\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit() == False:\n # build compressed trie tree\n try:\n # remove the words whcih can't encode to ascII\n word = word.lower().strip().encode('ascII')\n except:\n # print word\n a = 1\n else:\n #inserting words into tree\n insert(root, word.decode(\"utf-8\"), fname)\n \n# Asking the user for input word that we search \nEnter = input(\"Please enter what you would like to search for: \")\n#In case if multiple word search\ninp = Enter.split(' ')\nrank = {}\n#searching for each word of the input\nfor word in inp:\n #search in trie, store the result in dic\n boolw,dic = find_prefix(root, word.lower())\n#ranking the files in which the word was present\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\n#ranking website based on number of time word present - sort them in acsending order and reversing them so we display \n# the websites in order of relevance\nitems=[(v,k) for k,v in rank.items()]\nitems.sort()\nitems.reverse()\n#displaying search results\nif not items:\n print(\"No results\")\nelse:\n print(\"Results : \")\n#printing all the files the input was found in, in order of maximum occurences \n for key in items:\n print(key)\n \n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(drugs)
<|reserved_special_token_0|>
for drug in drugs:
with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug
) as json_file:
for record in json.load(json_file):
output_records.append({field: (str(record[field]) if field in
record else '') for field in fields})
write_to_csv(
'/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',
output_records)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
drugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')
print(drugs)
output_records = []
fields = ['equiv_name', 'default_quantity', 'root', 'dosage', 'generic',
'drug_id', 'form_name', 'slug']
for drug in drugs:
with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug
) as json_file:
for record in json.load(json_file):
output_records.append({field: (str(record[field]) if field in
record else '') for field in fields})
write_to_csv(
'/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',
output_records)
<|reserved_special_token_1|>
import requests
import urllib.request
from utilities.read_write_utilities import read_set, write_to_csv
import time
from bs4 import BeautifulSoup
import pickledb
import json
import glob
import csv
drugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')
print(drugs)
output_records = []
fields = ['equiv_name', 'default_quantity', 'root', 'dosage', 'generic',
'drug_id', 'form_name', 'slug']
for drug in drugs:
with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug
) as json_file:
for record in json.load(json_file):
output_records.append({field: (str(record[field]) if field in
record else '') for field in fields})
write_to_csv(
'/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',
output_records)
<|reserved_special_token_1|>
import requests
import urllib.request
from utilities.read_write_utilities import read_set,write_to_csv
import time
from bs4 import BeautifulSoup
import pickledb
import json
import glob
import csv
drugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')
print(drugs)
output_records = []
# fields = ["equiv_name","coupon_network","npi","default_quantity","price_type","scrape_date","price","root","dosage",
# "generic","drug_id","date","form_name","ncpdp","pharmacy","geo","slug","quantity"]
fields = ["equiv_name","default_quantity","root","dosage","generic","drug_id","form_name","slug"]
for drug in drugs:
# print('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug)
with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug) as json_file:
for record in json.load(json_file):
# print(record)
output_records.append({field:str(record[field]) if field in record else '' for field in fields})
write_to_csv('/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',output_records)
# filename = '/Users/sdey/Downloads/privia_utilization_data.csv'
# output_filename = '/Users/sdey/Downloads/privia_utilization_raw_data.csv'
#
# with open(filename, 'r') as input_file:
# with open(output_filename, 'w') as output_file:
# reader = csv.DictReader(input_file)
# writer = csv.DictWriter(output_file, fieldnames=reader.fieldnames)
# writer.writeheader()
# number_of_lines = 0
# for row in reader:
# row['Medication Name'] = row['Medication Name'].replace(',',':')
# writer.writerow(row)
# number_of_lines+=1
# if number_of_lines % 10000 == 0 :
# print('%d lines'%number_of_lines)
#
# filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515.csv'
# output_filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515_output.csv'
#
# with open(filename, 'r') as input_file:
# with open(output_filename, 'w') as output_file:
# reader = csv.DictReader(input_file)
# fieldnames = ['ndc','nadac_per_unit','effective_date','pricing_unit','otc',
# 'explanation_code','classification_for_rate_setting','corresponding_generic_drug_nadac_per_unit',
# 'corresponding_generic_drug_effective_date','as_of_date']
# writer = csv.DictWriter(output_file, fieldnames=fieldnames)
# writer.writeheader()
# number_of_lines = 0
# for row in reader:
# row['explanation_code'] = row['explanation_code'].replace('\"','').replace(',','').replace(' ','')
# row.pop('ndc_description')
# row.pop('pharmacy_type_indicator')
# writer.writerow(row)
# number_of_lines+=1
# if number_of_lines % 10000 == 0 :
# print('%d lines'%number_of_lines)
|
flexible
|
{
"blob_id": "e7f511b97f316157a768203afe9f36ea834ebb6c",
"index": 5493,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(drugs)\n<mask token>\nfor drug in drugs:\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug\n ) as json_file:\n for record in json.load(json_file):\n output_records.append({field: (str(record[field]) if field in\n record else '') for field in fields})\nwrite_to_csv(\n '/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',\n output_records)\n",
"step-3": "<mask token>\ndrugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')\nprint(drugs)\noutput_records = []\nfields = ['equiv_name', 'default_quantity', 'root', 'dosage', 'generic',\n 'drug_id', 'form_name', 'slug']\nfor drug in drugs:\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug\n ) as json_file:\n for record in json.load(json_file):\n output_records.append({field: (str(record[field]) if field in\n record else '') for field in fields})\nwrite_to_csv(\n '/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',\n output_records)\n",
"step-4": "import requests\nimport urllib.request\nfrom utilities.read_write_utilities import read_set, write_to_csv\nimport time\nfrom bs4 import BeautifulSoup\nimport pickledb\nimport json\nimport glob\nimport csv\ndrugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')\nprint(drugs)\noutput_records = []\nfields = ['equiv_name', 'default_quantity', 'root', 'dosage', 'generic',\n 'drug_id', 'form_name', 'slug']\nfor drug in drugs:\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug\n ) as json_file:\n for record in json.load(json_file):\n output_records.append({field: (str(record[field]) if field in\n record else '') for field in fields})\nwrite_to_csv(\n '/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',\n output_records)\n",
"step-5": "import requests\nimport urllib.request\nfrom utilities.read_write_utilities import read_set,write_to_csv\nimport time\nfrom bs4 import BeautifulSoup\nimport pickledb\nimport json\nimport glob\nimport csv\n\n\ndrugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')\nprint(drugs)\noutput_records = []\n# fields = [\"equiv_name\",\"coupon_network\",\"npi\",\"default_quantity\",\"price_type\",\"scrape_date\",\"price\",\"root\",\"dosage\",\n# \"generic\",\"drug_id\",\"date\",\"form_name\",\"ncpdp\",\"pharmacy\",\"geo\",\"slug\",\"quantity\"]\n\nfields = [\"equiv_name\",\"default_quantity\",\"root\",\"dosage\",\"generic\",\"drug_id\",\"form_name\",\"slug\"]\n\nfor drug in drugs:\n # print('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug)\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug) as json_file:\n for record in json.load(json_file):\n # print(record)\n output_records.append({field:str(record[field]) if field in record else '' for field in fields})\nwrite_to_csv('/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',output_records)\n\n\n# filename = '/Users/sdey/Downloads/privia_utilization_data.csv'\n# output_filename = '/Users/sdey/Downloads/privia_utilization_raw_data.csv'\n#\n# with open(filename, 'r') as input_file:\n# with open(output_filename, 'w') as output_file:\n# reader = csv.DictReader(input_file)\n# writer = csv.DictWriter(output_file, fieldnames=reader.fieldnames)\n# writer.writeheader()\n# number_of_lines = 0\n# for row in reader:\n# row['Medication Name'] = row['Medication Name'].replace(',',':')\n# writer.writerow(row)\n# number_of_lines+=1\n# if number_of_lines % 10000 == 0 :\n# print('%d lines'%number_of_lines)\n\n#\n# filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515.csv'\n# output_filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515_output.csv'\n#\n# with open(filename, 'r') as input_file:\n# with open(output_filename, 'w') as output_file:\n# reader = csv.DictReader(input_file)\n# fieldnames = ['ndc','nadac_per_unit','effective_date','pricing_unit','otc',\n# 'explanation_code','classification_for_rate_setting','corresponding_generic_drug_nadac_per_unit',\n# 'corresponding_generic_drug_effective_date','as_of_date']\n# writer = csv.DictWriter(output_file, fieldnames=fieldnames)\n# writer.writeheader()\n# number_of_lines = 0\n# for row in reader:\n# row['explanation_code'] = row['explanation_code'].replace('\\\"','').replace(',','').replace(' ','')\n# row.pop('ndc_description')\n# row.pop('pharmacy_type_indicator')\n# writer.writerow(row)\n# number_of_lines+=1\n# if number_of_lines % 10000 == 0 :\n# print('%d lines'%number_of_lines)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import numpy as np
from torch.autograd import Variable
from util import helpers
from util.metrics import ECELoss, ece_score
import sklearn.metrics as skm
import os
import pandas as pd
import pickle
def eval(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
ece_criterion = ECELoss().cuda()
net.eval()
net.training = False
correct = 0
total = 0
logits_list = []
labels_list = []
confidence_list = []
correct_list = []
predicted_list = []
sne_embeddings = []
print('| Classification confidence for ID is saved at: {}'.format(path_in))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f1.write("{}\n".format(np.max(nnOutputs[k])))
confidence_list.append(np.max(nnOutputs[k]))
sne_embeddings.append(hidden.data.cpu()[k].numpy())
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
correct_list.extend(predicted.eq(targets.data).cpu().tolist())
predicted_list.extend(predicted.cpu().tolist())
logits_list.append(outputs.data)
labels_list.append(targets.data)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
ece = ece_criterion(logits, labels)
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:
pickle.dump(sne_embeddings, f)
with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:
for item in labels_list:
f.write('{}\n'.format(item.cpu().numpy()[0]))
with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:
for item in predicted_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:
for item in correct_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:
for item in confidence_list:
f.write('{}\n'.format(item))
acc = 100.*correct/total
acc_list = (sum(correct_list)/len(correct_list))
# calculate AUROC for classifcation accuracy
fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
print("| Test Result\tAcc@1: %.2f%%" %(acc))
print(f'| ECE: {ece.item()}')
# print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')
print(f'| Acc list: {acc_list}')
print(f'| AUROC classification: {auroc_classification}')
sne_embeddings_ood = []
print('| Classification confidence for OOD is saved at: {}'.format(path_out))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(oodloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f2.write("{}\n".format(np.max(nnOutputs[k])))
sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:
pickle.dump(sne_embeddings_ood, f)
def eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
ece_criterion = ECELoss().cuda()
net.eval()
net.training = False
correct = 0
total = 0
logits_list = []
labels_list = []
confidence_list = []
correct_list = []
predicted_list = []
sne_embeddings = []
print('| Classification confidence for ID is saved at: {}'.format(path_in))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f1.write("{}\n".format(np.max(nnOutputs[k])))
confidence_list.append(np.max(nnOutputs[k]))
sne_embeddings.append(hidden.data.cpu()[k].numpy())
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
correct_list.extend(predicted.eq(targets.data).cpu().tolist())
predicted_list.extend(predicted.cpu().tolist())
logits_list.append(outputs.data)
labels_list.append(targets.data)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
labels_list = torch.cat(labels_list).cpu().tolist()
ece = ece_criterion(logits, labels)
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:
pickle.dump(sne_embeddings, f)
with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:
for item in labels_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:
for item in predicted_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:
for item in correct_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w') as f:
for item in confidence_list:
f.write('{}\n'.format(item))
acc = 100.*correct/total
acc_list = (sum(correct_list)/len(correct_list))
# calculate AUROC for classifcation accuracy
fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
print("| Test Result\tAcc@1: %.2f%%" %(acc))
print(f'| ECE: {ece.item()}')
# print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')
print(f'| Acc list: {acc_list}')
print(f'| AUROC classification: {auroc_classification}')
sne_embeddings_ood = []
print('| Classification confidence for OOD is saved at: {}'.format(path_out))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(oodloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f2.write("{}\n".format(np.max(nnOutputs[k])))
sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb') as f:
pickle.dump(sne_embeddings_ood, f)
def train():
pass
|
normal
|
{
"blob_id": "edd2b7b453d7fa33e6cca3b5dbc895f034a9e22a",
"index": 2746,
"step-1": "<mask token>\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef train():\n pass\n",
"step-3": "<mask token>\n\n\ndef eval(path_in, path_out, net, testloader, oodloader, use_cuda=True,\n save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item.cpu().numpy()[0]))\n with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef train():\n pass\n",
"step-4": "import torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom util import helpers\nfrom util.metrics import ECELoss, ece_score\nimport sklearn.metrics as skm\nimport os\nimport pandas as pd\nimport pickle\n\n\ndef eval(path_in, path_out, net, testloader, oodloader, use_cuda=True,\n save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item.cpu().numpy()[0]))\n with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=\n True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write('{}\\n'.format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w'\n ) as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.0 * correct / total\n acc_list = sum(correct_list) / len(correct_list)\n fpr, tpr, _ = skm.roc_curve(y_true=correct_list, y_score=\n confidence_list, pos_label=1)\n auroc_classification = skm.auc(fpr, tpr)\n print('| Test Result\\tAcc@1: %.2f%%' % acc)\n print(f'| ECE: {ece.item()}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(\n path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write('{}\\n'.format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb'\n ) as f:\n pickle.dump(sne_embeddings_ood, f)\n\n\ndef train():\n pass\n",
"step-5": "import torch\nimport numpy as np \nfrom torch.autograd import Variable\n\nfrom util import helpers\nfrom util.metrics import ECELoss, ece_score\nimport sklearn.metrics as skm\nimport os\nimport pandas as pd\nimport pickle\n\ndef eval(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n\n \n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item.cpu().numpy()[0]))\n with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.*correct/total\n acc_list = (sum(correct_list)/len(correct_list))\n\n # calculate AUROC for classifcation accuracy\n fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0\n auroc_classification = skm.auc(fpr, tpr)\n \n print(\"| Test Result\\tAcc@1: %.2f%%\" %(acc))\n print(f'| ECE: {ece.item()}')\n # print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n\n sne_embeddings_ood = []\n\n print('| Classification confidence for OOD is saved at: {}'.format(path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\n\ndef eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):\n f1 = open(path_in, 'w')\n f2 = open(path_out, 'w')\n ece_criterion = ECELoss().cuda()\n net.eval()\n net.training = False\n correct = 0\n total = 0\n logits_list = []\n labels_list = []\n confidence_list = []\n correct_list = []\n predicted_list = []\n sne_embeddings = []\n print('| Classification confidence for ID is saved at: {}'.format(path_in))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f1.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n confidence_list.append(np.max(nnOutputs[k]))\n sne_embeddings.append(hidden.data.cpu()[k].numpy())\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n correct_list.extend(predicted.eq(targets.data).cpu().tolist())\n predicted_list.extend(predicted.cpu().tolist())\n logits_list.append(outputs.data)\n labels_list.append(targets.data)\n \n logits = torch.cat(logits_list).cuda()\n labels = torch.cat(labels_list).cuda()\n labels_list = torch.cat(labels_list).cpu().tolist()\n ece = ece_criterion(logits, labels)\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings, f)\n with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:\n for item in labels_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:\n for item in predicted_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:\n for item in correct_list:\n f.write('{}\\n'.format(item))\n with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w') as f:\n for item in confidence_list:\n f.write('{}\\n'.format(item))\n acc = 100.*correct/total\n acc_list = (sum(correct_list)/len(correct_list))\n\n # calculate AUROC for classifcation accuracy\n fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0\n auroc_classification = skm.auc(fpr, tpr)\n \n print(\"| Test Result\\tAcc@1: %.2f%%\" %(acc))\n print(f'| ECE: {ece.item()}')\n # print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')\n print(f'| Acc list: {acc_list}')\n print(f'| AUROC classification: {auroc_classification}')\n sne_embeddings_ood = []\n print('| Classification confidence for OOD is saved at: {}'.format(path_out))\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(oodloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs, hidden = net(inputs)\n # this is the OOD magic\n nnOutputs = helpers.softmax(outputs)\n for k in range(len(inputs)):\n f2.write(\"{}\\n\".format(np.max(nnOutputs[k])))\n sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())\n\n\n if save_dir:\n with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb') as f:\n pickle.dump(sne_embeddings_ood, f)\ndef train():\n pass\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- python -*-
# ex: set syntax=python:
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# See master.experimental/slaves.cfg for documentation.
slaves = [
################################################################################
# Linux
################################################################################
# {
# 'master': 'Chromium',
# 'hostname': 'build59-m1',
# 'builder': 'Linux Builder x64',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'vm119-m1',
# 'builder': 'Linux Tests x64',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# {
# 'master': 'Chromium',
# 'builder': 'Linux (aura)',
# 'hostname': 'vm80-m1',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'build13-m1',
# 'builder': 'Linux Builder (dbg)',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'vm128-m1',
# 'builder': 'Linux Tests (dbg)(1)',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'vm129-m1',
# 'builder': 'Linux Tests (dbg)(2)',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'builder': 'Linux Sync',
# 'hostname': 'vm121-m1',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# {
# 'master': 'Chromium',
# 'builder': 'Linux Clang (dbg)',
# 'hostname': 'vm79-m1',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# ################################################################################
# # Android
# ################################################################################
# {
# 'master': 'Chromium',
# 'hostname': 'vm138-m1',
# 'builder': 'Android Builder',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
]
|
normal
|
{
"blob_id": "e807cef534226f3efb4a8df471598727fa068f02",
"index": 3805,
"step-1": "<mask token>\n",
"step-2": "slaves = []\n",
"step-3": "# -*- python -*-\n# ex: set syntax=python:\n\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n# See master.experimental/slaves.cfg for documentation.\n\n\nslaves = [\n################################################################################\n# Linux\n################################################################################\n# {\n# 'master': 'Chromium',\n# 'hostname': 'build59-m1',\n# 'builder': 'Linux Builder x64',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '64',\n# },\n# {\n# 'master': 'Chromium',\n# 'hostname': 'vm119-m1',\n# 'builder': 'Linux Tests x64',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '64',\n# },\n# {\n# 'master': 'Chromium',\n# 'builder': 'Linux (aura)',\n# 'hostname': 'vm80-m1',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '32',\n# },\n# {\n# 'master': 'Chromium',\n# 'hostname': 'build13-m1',\n# 'builder': 'Linux Builder (dbg)',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '32',\n# },\n# {\n# 'master': 'Chromium',\n# 'hostname': 'vm128-m1',\n# 'builder': 'Linux Tests (dbg)(1)',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '32',\n# },\n# {\n# 'master': 'Chromium',\n# 'hostname': 'vm129-m1',\n# 'builder': 'Linux Tests (dbg)(2)',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '32',\n# },\n# {\n# 'master': 'Chromium',\n# 'builder': 'Linux Sync',\n# 'hostname': 'vm121-m1',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '64',\n# },\n# {\n# 'master': 'Chromium',\n# 'builder': 'Linux Clang (dbg)',\n# 'hostname': 'vm79-m1',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '64',\n# },\n# ################################################################################\n# # Android\n# ################################################################################\n# {\n# 'master': 'Chromium',\n# 'hostname': 'vm138-m1',\n# 'builder': 'Android Builder',\n# 'os': 'linux',\n# 'version': 'lucid',\n# 'bits': '64',\n# },\n]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 2.2.1 on 2019-05-23 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('presentes', '0015_caso_lugar_del_hecho'),
]
operations = [
migrations.AddField(
model_name='organizacion',
name='descripcion',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='organizacion',
name='email',
field=models.CharField(default='', max_length=200),
),
]
|
normal
|
{
"blob_id": "5cd767564e8a261561e141abeebb5221cb3ef2c2",
"index": 6919,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('presentes', '0015_caso_lugar_del_hecho')]\n operations = [migrations.AddField(model_name='organizacion', name=\n 'descripcion', field=models.TextField(default='')), migrations.\n AddField(model_name='organizacion', name='email', field=models.\n CharField(default='', max_length=200))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('presentes', '0015_caso_lugar_del_hecho')]\n operations = [migrations.AddField(model_name='organizacion', name=\n 'descripcion', field=models.TextField(default='')), migrations.\n AddField(model_name='organizacion', name='email', field=models.\n CharField(default='', max_length=200))]\n",
"step-5": "# Generated by Django 2.2.1 on 2019-05-23 14:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('presentes', '0015_caso_lugar_del_hecho'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='organizacion',\n name='descripcion',\n field=models.TextField(default=''),\n ),\n migrations.AddField(\n model_name='organizacion',\n name='email',\n field=models.CharField(default='', max_length=200),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Write a program that asks the user to enter a word and then
capitalizes every other letter of that word. So if the user enters "rhinoceros",
the program should print "rHiNoCeRoS"""
word=str(input("please enter the word\n"))
count=0
for char in word:
if count==0:
print(char.upper(),end="")
count=1
else:
print(char.lower(),end="")
count=0
|
normal
|
{
"blob_id": "bc837d95ef22bd376f8b095e7aeb1f7d15c0e22e",
"index": 941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor char in word:\n if count == 0:\n print(char.upper(), end='')\n count = 1\n else:\n print(char.lower(), end='')\n count = 0\n",
"step-3": "<mask token>\nword = str(input('please enter the word\\n'))\ncount = 0\nfor char in word:\n if count == 0:\n print(char.upper(), end='')\n count = 1\n else:\n print(char.lower(), end='')\n count = 0\n",
"step-4": "\"\"\"Write a program that asks the user to enter a word and then\ncapitalizes every other letter of that word. So if the user enters \"rhinoceros\",\nthe program should print \"rHiNoCeRoS\"\"\"\n\nword=str(input(\"please enter the word\\n\"))\ncount=0\nfor char in word:\n if count==0:\n print(char.upper(),end=\"\")\n count=1\n else:\n print(char.lower(),end=\"\")\n count=0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
def test_register_admin(self):
response = self.client.post(f'/account/register/', data=account,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
def test_register_admin(self):
response = self.client.post(f'/account/register/', data=account,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
def test_login(self):
data = {'email': 'office@theoscoding.com', 'password': 'Pwd1q2w3e'}
Account.objects.create(**data)
response = self.client.post(f'/account/login/', data=data,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
<|reserved_special_token_1|>
from django.test import TestCase, Client
from accounts.models import Account
from .data import account
from rest_framework import status
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
def test_register_admin(self):
response = self.client.post(f'/account/register/', data=account,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
def test_login(self):
data = {'email': 'office@theoscoding.com', 'password': 'Pwd1q2w3e'}
Account.objects.create(**data)
response = self.client.post(f'/account/login/', data=data,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
<|reserved_special_token_1|>
from django.test import TestCase, Client
from accounts.models import Account
from .data import account
from rest_framework import status
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
def test_register_admin(self):
response = self.client.post(f'/account/register/', data=account,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
def test_login(self):
data = {
'email': 'office@theoscoding.com',
'password': 'Pwd1q2w3e',
}
Account.objects.create(**data)
response = self.client.post(f'/account/login/', data=data,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
|
flexible
|
{
"blob_id": "3d43bf0d0ca1df06b3647a33f88cee067eeff9f4",
"index": 2605,
"step-1": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {'email': 'office@theoscoding.com', 'password': 'Pwd1q2w3e'}\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n",
"step-4": "from django.test import TestCase, Client\nfrom accounts.models import Account\nfrom .data import account\nfrom rest_framework import status\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {'email': 'office@theoscoding.com', 'password': 'Pwd1q2w3e'}\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n",
"step-5": "from django.test import TestCase, Client\n\nfrom accounts.models import Account\nfrom .data import account\nfrom rest_framework import status\n\n\nclass TestAccountRequests(TestCase):\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {\n 'email': 'office@theoscoding.com',\n 'password': 'Pwd1q2w3e',\n }\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#(C)Inspire Search 2020/5/31 Coded by Tsubasa Kato (@_stingraze)
#Last edited on 2020/6/1 11:36AM JST
import sys
import spacy
import re
#gets query from argv[1]
text = sys.argv[1]
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
ahref = "<a href=\""
ahref2 = "\"\>"
#arrays for storing subject and object types
subj_array = []
obj_array = []
for d in doc:
#print((d.text, d.pos_, d.dep_))
word = d.text
pos = d.pos_
dep = d.dep_
#If it matches subject, do this
if re.search(r'subj', dep):
#URL to SuperAI Search
word2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'
subj_array.append(word)
print (word2)
print (pos)
print (dep)
#If it matches object, do this
if re.search(r'obj', dep):
#URL to SuperAI Search
word2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'
obj_array.append(word)
print (word2)
print (pos)
print (dep)
#Sorts both arrays
#ToDo & Note to self:
#Study more of sorting so I can visualize this as table etc.
subj_array.sort()
obj_array.sort()
for subj in subj_array:
print (subj)
for obj in obj_array:
print (obj)
|
normal
|
{
"blob_id": "ecc001394c1f3bba78559cba7eeb216dd3a942d8",
"index": 4711,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor d in doc:\n word = d.text\n pos = d.pos_\n dep = d.dep_\n if re.search('subj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n subj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\n if re.search('obj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n obj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n print(subj)\nfor obj in obj_array:\n print(obj)\n",
"step-3": "<mask token>\ntext = sys.argv[1]\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(text)\nahref = '<a href=\"'\nahref2 = '\"\\\\>'\nsubj_array = []\nobj_array = []\nfor d in doc:\n word = d.text\n pos = d.pos_\n dep = d.dep_\n if re.search('subj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n subj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\n if re.search('obj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n obj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n print(subj)\nfor obj in obj_array:\n print(obj)\n",
"step-4": "import sys\nimport spacy\nimport re\ntext = sys.argv[1]\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(text)\nahref = '<a href=\"'\nahref2 = '\"\\\\>'\nsubj_array = []\nobj_array = []\nfor d in doc:\n word = d.text\n pos = d.pos_\n dep = d.dep_\n if re.search('subj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n subj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\n if re.search('obj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n obj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n print(subj)\nfor obj in obj_array:\n print(obj)\n",
"step-5": "#(C)Inspire Search 2020/5/31 Coded by Tsubasa Kato (@_stingraze)\n#Last edited on 2020/6/1 11:36AM JST\nimport sys\nimport spacy\nimport re\n#gets query from argv[1]\ntext = sys.argv[1]\n\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(text)\n\nahref = \"<a href=\\\"\"\nahref2 = \"\\\"\\>\"\n\n#arrays for storing subject and object types \nsubj_array = []\nobj_array = []\n\nfor d in doc:\n\t#print((d.text, d.pos_, d.dep_))\n\tword = d.text\n\tpos = d.pos_\n\tdep = d.dep_\n#If it matches subject, do this\n\tif re.search(r'subj', dep):\n\t\t#URL to SuperAI Search\n\t\tword2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'\n\t\tsubj_array.append(word)\n\t\tprint (word2)\n\t\tprint (pos)\n\t\tprint (dep)\n\n#If it matches object, do this\n\tif re.search(r'obj', dep):\n\t\t#URL to SuperAI Search\n\t\tword2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'\n\t\tobj_array.append(word)\n\t\tprint (word2)\n\t\tprint (pos)\n\t\tprint (dep)\n\n\n#Sorts both arrays\n#ToDo & Note to self: \n#Study more of sorting so I can visualize this as table etc.\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n\tprint (subj)\n\nfor obj in obj_array:\n\tprint (obj)\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
x = rng.integers(low=0, high=1920)
y = rng.integers(low=0, high=1080)
if i == 0:
flock.append(Boid(x, y, width, height, infected=True, curado=False,
alive=True))
else:
flock.append(Boid(x, y, width, height, infected=False, curado=False,
alive=True))
def setup():
size(width, height)
def draw():
global flock, frames
background(30, 30, 47)
for boid in flock:
boid.edges()
boid.apply_behaviour(flock)
boid.infection(flock)
boid.update()
boid.show()
boid.livesordie()
Data.count(flock)
run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = 30
width = 1920
height = 1080
flock = []
infected = []
rng = default_rng()
frames = 0
for i in range(n):
x = rng.integers(low=0, high=1920)
y = rng.integers(low=0, high=1080)
if i == 0:
flock.append(Boid(x, y, width, height, infected=True, curado=False,
alive=True))
else:
flock.append(Boid(x, y, width, height, infected=False, curado=False,
alive=True))
def setup():
size(width, height)
def draw():
global flock, frames
background(30, 30, 47)
for boid in flock:
boid.edges()
boid.apply_behaviour(flock)
boid.infection(flock)
boid.update()
boid.show()
boid.livesordie()
Data.count(flock)
run()
<|reserved_special_token_1|>
from p5 import *
import numpy as np
from numpy.random import default_rng
from boids import Boid
from data import Data
n = 30
width = 1920
height = 1080
flock = []
infected = []
rng = default_rng()
frames = 0
for i in range(n):
x = rng.integers(low=0, high=1920)
y = rng.integers(low=0, high=1080)
if i == 0:
flock.append(Boid(x, y, width, height, infected=True, curado=False,
alive=True))
else:
flock.append(Boid(x, y, width, height, infected=False, curado=False,
alive=True))
def setup():
size(width, height)
def draw():
global flock, frames
background(30, 30, 47)
for boid in flock:
boid.edges()
boid.apply_behaviour(flock)
boid.infection(flock)
boid.update()
boid.show()
boid.livesordie()
Data.count(flock)
run()
<|reserved_special_token_1|>
from p5 import *
import numpy as np
from numpy.random import default_rng
from boids import Boid
from data import Data
n=30;
width = 1920
height = 1080
flock=[]
infected=[]
rng = default_rng()
frames=0
for i in range(n):
x = rng.integers(low=0, high=1920)
y = rng.integers(low=0, high=1080)
if i==0:
flock.append(Boid(x,y, width, height,infected=True,curado=False,alive=True))
else:
flock.append(Boid(x,y, width, height,infected=False,curado=False,alive=True))
def setup():
#this happens just once
size(width, height) #instead of create_canvas
def draw():
global flock,frames
background(30, 30, 47)
for boid in flock:
boid.edges()
boid.apply_behaviour(flock)
boid.infection(flock)
boid.update()
boid.show()
boid.livesordie()
Data.count(flock)
run()
|
flexible
|
{
"blob_id": "78c4e14e5afdf857082b60bf4020f0f785d93a0d",
"index": 9704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n x = rng.integers(low=0, high=1920)\n y = rng.integers(low=0, high=1080)\n if i == 0:\n flock.append(Boid(x, y, width, height, infected=True, curado=False,\n alive=True))\n else:\n flock.append(Boid(x, y, width, height, infected=False, curado=False,\n alive=True))\n\n\ndef setup():\n size(width, height)\n\n\ndef draw():\n global flock, frames\n background(30, 30, 47)\n for boid in flock:\n boid.edges()\n boid.apply_behaviour(flock)\n boid.infection(flock)\n boid.update()\n boid.show()\n boid.livesordie()\n Data.count(flock)\n\n\nrun()\n",
"step-3": "<mask token>\nn = 30\nwidth = 1920\nheight = 1080\nflock = []\ninfected = []\nrng = default_rng()\nframes = 0\nfor i in range(n):\n x = rng.integers(low=0, high=1920)\n y = rng.integers(low=0, high=1080)\n if i == 0:\n flock.append(Boid(x, y, width, height, infected=True, curado=False,\n alive=True))\n else:\n flock.append(Boid(x, y, width, height, infected=False, curado=False,\n alive=True))\n\n\ndef setup():\n size(width, height)\n\n\ndef draw():\n global flock, frames\n background(30, 30, 47)\n for boid in flock:\n boid.edges()\n boid.apply_behaviour(flock)\n boid.infection(flock)\n boid.update()\n boid.show()\n boid.livesordie()\n Data.count(flock)\n\n\nrun()\n",
"step-4": "from p5 import *\nimport numpy as np\nfrom numpy.random import default_rng\nfrom boids import Boid\nfrom data import Data\nn = 30\nwidth = 1920\nheight = 1080\nflock = []\ninfected = []\nrng = default_rng()\nframes = 0\nfor i in range(n):\n x = rng.integers(low=0, high=1920)\n y = rng.integers(low=0, high=1080)\n if i == 0:\n flock.append(Boid(x, y, width, height, infected=True, curado=False,\n alive=True))\n else:\n flock.append(Boid(x, y, width, height, infected=False, curado=False,\n alive=True))\n\n\ndef setup():\n size(width, height)\n\n\ndef draw():\n global flock, frames\n background(30, 30, 47)\n for boid in flock:\n boid.edges()\n boid.apply_behaviour(flock)\n boid.infection(flock)\n boid.update()\n boid.show()\n boid.livesordie()\n Data.count(flock)\n\n\nrun()\n",
"step-5": "from p5 import *\nimport numpy as np\nfrom numpy.random import default_rng\nfrom boids import Boid\nfrom data import Data\nn=30;\nwidth = 1920\nheight = 1080\nflock=[]\ninfected=[]\nrng = default_rng()\nframes=0\n\nfor i in range(n):\n x = rng.integers(low=0, high=1920)\n y = rng.integers(low=0, high=1080)\n\n if i==0:\n flock.append(Boid(x,y, width, height,infected=True,curado=False,alive=True))\n else:\n flock.append(Boid(x,y, width, height,infected=False,curado=False,alive=True))\n\ndef setup():\n #this happens just once\n size(width, height) #instead of create_canvas\n\n\ndef draw():\n global flock,frames\n \n background(30, 30, 47)\n\n\n for boid in flock:\n boid.edges()\n boid.apply_behaviour(flock)\n boid.infection(flock)\n boid.update() \n boid.show()\n boid.livesordie()\n Data.count(flock)\n \n\nrun()",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
import re
def find_all_links(text):
result = []
iterator = re.finditer(r"https?\:\/\/(www)?\.?\w+\.\w+", text)
for match in iterator:
result.append(match.group())
return result
|
normal
|
{
"blob_id": "b8c7aa5ff7387eacb45d996fa47186d193b44782",
"index": 4823,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_all_links(text):\n result = []\n iterator = re.finditer('https?\\\\:\\\\/\\\\/(www)?\\\\.?\\\\w+\\\\.\\\\w+', text)\n for match in iterator:\n result.append(match.group())\n return result\n",
"step-3": "import re\n\n\ndef find_all_links(text):\n result = []\n iterator = re.finditer('https?\\\\:\\\\/\\\\/(www)?\\\\.?\\\\w+\\\\.\\\\w+', text)\n for match in iterator:\n result.append(match.group())\n return result\n",
"step-4": "import re\n\n\ndef find_all_links(text):\n result = []\n iterator = re.finditer(r\"https?\\:\\/\\/(www)?\\.?\\w+\\.\\w+\", text)\n for match in iterator:\n result.append(match.group())\n return result",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from flask_cors import CORS
from flask_misaka import Misaka
from flask_mailman import Mail
from flask_talisman import Talisman
from werkzeug.middleware.proxy_fix import ProxyFix
from micawber.providers import bootstrap_basic
from whitenoise import WhiteNoise
from pytz import timezone
from urllib.parse import quote_plus
from dribdat import commands, public, admin
from dribdat.assets import assets # noqa: I005
from dribdat.sso import get_auth_blueprint
from dribdat.extensions import (
hashing,
cache,
db,
login_manager,
migrate,
)
from dribdat.settings import ProdConfig # noqa: I005
from dribdat.utils import timesince
from dribdat.onebox import make_oembedplus
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
# Set up cross-site access to the API
if app.config['SERVER_CORS']:
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Set up using an external proxy/static server
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
# Internally optimize static file hosting
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
def init_mailman(app):
"""Initialize mailer support."""
if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:
if not app.config['MAIL_DEFAULT_SENDER']:
app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')
else:
mail = Mail()
mail.init_app(app)
def init_talisman(app):
"""Initialize Talisman support."""
if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:
Talisman(app,
content_security_policy=app.config['CSP_DIRECTIVES'],
frame_options_allow_from='*')
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix="/oauth")
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {
'db': db,
'User': User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_filters(app):
"""Register filters for templates."""
#
# Conversion of Markdown to HTML
Misaka(app, autolink=True, fenced_code=True,
strikethrough=True, tables=True)
# Registration of handlers for micawber
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(
value, app.oembed_providers, maxwidth=600, maxheight=400
)
# Timezone helper
app.tz = timezone(app.config['TIME_ZONE'])
# Lambda filters for safe image_url's
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')
# Custom filters
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default="now!", until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None: return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None: return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
|
normal
|
{
"blob_id": "2257f73a290dfd428a874e963c26e51f1c1f1efa",
"index": 927,
"step-1": "<mask token>\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<mask token>\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-3": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-4": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"The app module, containing the app factory function.\"\"\"\n\nfrom flask import Flask, render_template\nfrom flask_cors import CORS\nfrom flask_misaka import Misaka\nfrom flask_mailman import Mail\nfrom flask_talisman import Talisman\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom micawber.providers import bootstrap_basic\nfrom whitenoise import WhiteNoise\nfrom pytz import timezone\nfrom urllib.parse import quote_plus\nfrom dribdat import commands, public, admin\nfrom dribdat.assets import assets # noqa: I005\nfrom dribdat.sso import get_auth_blueprint\nfrom dribdat.extensions import (\n hashing,\n cache,\n db,\n login_manager,\n migrate,\n)\nfrom dribdat.settings import ProdConfig # noqa: I005\nfrom dribdat.utils import timesince\nfrom dribdat.onebox import make_oembedplus\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # Set up cross-site access to the API\n if app.config['SERVER_CORS']:\n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n\n # Set up using an external proxy/static server\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n # Internally optimize static file hosting\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app,\n content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix=\"/oauth\")\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n # If a HTTPException, pull the `code` attribute; default to 500\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {\n 'db': db,\n 'User': User}\n\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n #\n # Conversion of Markdown to HTML\n Misaka(app, autolink=True, fenced_code=True,\n strikethrough=True, tables=True)\n\n # Registration of handlers for micawber\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(\n value, app.oembed_providers, maxwidth=600, maxheight=400\n )\n\n # Timezone helper\n app.tz = timezone(app.config['TIME_ZONE'])\n\n # Lambda filters for safe image_url's\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')\n\n # Custom filters\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default=\"now!\", until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None: return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None: return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n",
"step-ids": [
5,
9,
10,
12,
14
]
}
|
[
5,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
class TCRPowerCalculator:
<|reserved_special_token_0|>
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
<|reserved_special_token_0|>
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = (self.pcmodel.
predict_detection_probability)
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
<|reserved_special_token_0|>
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = (self.pcmodel.
predict_detection_probability)
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):
opt_f = partial(self.pcmodel.predict_detection_probability,
num_reads=num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -
conf_level, method='brentq', bracket=[1e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
<|reserved_special_token_1|>
import numpy as np
import numdifftools as nd
from scipy import stats
from scipy import optimize
from functools import partial
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = (self.pcmodel.
predict_detection_probability)
def predict_detection_probability_2step(self, tcr_frequency, num_reads,
num_cells, detect_thresh=1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency * num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
num_cells_TCR = num_cells_TCR[p1 > 0]
p1 = p1[p1 > 0]
mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,
num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count=i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):
opt_f = partial(self.pcmodel.predict_detection_probability,
num_reads=num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -
conf_level, method='brentq', bracket=[1e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability,
tcr_frequencies=tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=
nreads) - conf_level, method='secant', x0=1e-16, x1=1)
return int(np.around(opt_res.root))
<|reserved_special_token_1|>
import numpy as np
import numdifftools as nd
from scipy import stats
from scipy import optimize
from functools import partial
class TCRPowerCalculator:
def __init__(self, pcmodel):
self.pcmodel = pcmodel
self.predict_variance = self.pcmodel.predict_variance
self.predict_mean = self.pcmodel.predict_mean
self.get_prediction_interval = self.pcmodel.get_prediction_interval
self.predict_detection_probability = self.pcmodel.predict_detection_probability
#possivle TODO: Parse this method out into a new 2-step model class
def predict_detection_probability_2step(self, tcr_frequency, num_reads, num_cells, detect_thresh = 1):
"""
2-step detection probability model where
1) Num_cells_TCR is sampled first from the blood (Poisson model)
2) The RNA detection probability is calculated (Negbin model).
The num_cells_TCR is marginalized with the num_cells parameter as the upper limit
on the number of cells that could be sampled for a given TCR.
"""
mu_cells = tcr_frequency*num_cells
p0_poisson = stats.poisson.pmf(0, mu_cells)
num_cells_TCR = np.arange(1, num_cells + 1)[:,np.newaxis]
#Step 1 Poisson
p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)
#Get rid of 0 probability cell counts
num_cells_TCR = num_cells_TCR[p1 >0]
p1 = p1[p1 >0]
#Step 2 Negbin
mu_reads = self.pcmodel.predict_mean(num_cells_TCR/num_cells, num_reads)
p2 = np.zeros(p1.shape)
for i in np.arange(detect_thresh):
p2 += self.pcmodel.pmf(mu_reads, count = i)
p0_2step = np.dot(p1.squeeze(), p2.squeeze())
#If 0 cells from Poisson model then automatically get 0 reads
return 1.0 - p0_poisson - p0_2step
def get_limit_of_detection_tcrfreq(self, num_reads, conf_level = 0.95):
opt_f = partial(self.pcmodel.predict_detection_probability, num_reads = num_reads)
opt_res = optimize.root_scalar(lambda freq: opt_f(freq) - conf_level,
method = "brentq",
bracket = [1.0e-16, 1])
return opt_res.root
def get_limit_of_detection_nreads(self, tcr_freq, conf_level = 0.95):
opt_nreads = partial(self.pcmodel.predict_detection_probability, tcr_frequencies = tcr_freq)
opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads = nreads) - conf_level,
method = "secant",
x0 = 1.0e-16,
x1 = 1)
return int(np.around(opt_res.root))
|
flexible
|
{
"blob_id": "d327151c9659078e12e4aca46631de33e7ca4dcf",
"index": 167,
"step-1": "<mask token>\n\n\nclass TCRPowerCalculator:\n <mask token>\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n <mask token>\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-2": "<mask token>\n\n\nclass TCRPowerCalculator:\n\n def __init__(self, pcmodel):\n self.pcmodel = pcmodel\n self.predict_variance = self.pcmodel.predict_variance\n self.predict_mean = self.pcmodel.predict_mean\n self.get_prediction_interval = self.pcmodel.get_prediction_interval\n self.predict_detection_probability = (self.pcmodel.\n predict_detection_probability)\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n <mask token>\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-3": "<mask token>\n\n\nclass TCRPowerCalculator:\n\n def __init__(self, pcmodel):\n self.pcmodel = pcmodel\n self.predict_variance = self.pcmodel.predict_variance\n self.predict_mean = self.pcmodel.predict_mean\n self.get_prediction_interval = self.pcmodel.get_prediction_interval\n self.predict_detection_probability = (self.pcmodel.\n predict_detection_probability)\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n\n def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):\n opt_f = partial(self.pcmodel.predict_detection_probability,\n num_reads=num_reads)\n opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -\n conf_level, method='brentq', bracket=[1e-16, 1])\n return opt_res.root\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-4": "import numpy as np\nimport numdifftools as nd\nfrom scipy import stats\nfrom scipy import optimize\nfrom functools import partial\n\n\nclass TCRPowerCalculator:\n\n def __init__(self, pcmodel):\n self.pcmodel = pcmodel\n self.predict_variance = self.pcmodel.predict_variance\n self.predict_mean = self.pcmodel.predict_mean\n self.get_prediction_interval = self.pcmodel.get_prediction_interval\n self.predict_detection_probability = (self.pcmodel.\n predict_detection_probability)\n\n def predict_detection_probability_2step(self, tcr_frequency, num_reads,\n num_cells, detect_thresh=1):\n \"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n mu_cells = tcr_frequency * num_cells\n p0_poisson = stats.poisson.pmf(0, mu_cells)\n num_cells_TCR = np.arange(1, num_cells + 1)[:, np.newaxis]\n p1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n num_cells_TCR = num_cells_TCR[p1 > 0]\n p1 = p1[p1 > 0]\n mu_reads = self.pcmodel.predict_mean(num_cells_TCR / num_cells,\n num_reads)\n p2 = np.zeros(p1.shape)\n for i in np.arange(detect_thresh):\n p2 += self.pcmodel.pmf(mu_reads, count=i)\n p0_2step = np.dot(p1.squeeze(), p2.squeeze())\n return 1.0 - p0_poisson - p0_2step\n\n def get_limit_of_detection_tcrfreq(self, num_reads, conf_level=0.95):\n opt_f = partial(self.pcmodel.predict_detection_probability,\n num_reads=num_reads)\n opt_res = optimize.root_scalar(lambda freq: opt_f(freq) -\n conf_level, method='brentq', bracket=[1e-16, 1])\n return opt_res.root\n\n def get_limit_of_detection_nreads(self, tcr_freq, conf_level=0.95):\n opt_nreads = partial(self.pcmodel.predict_detection_probability,\n tcr_frequencies=tcr_freq)\n opt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads=\n nreads) - conf_level, method='secant', x0=1e-16, x1=1)\n return int(np.around(opt_res.root))\n",
"step-5": "import numpy as np \nimport numdifftools as nd\nfrom scipy import stats\nfrom scipy import optimize\nfrom functools import partial\n\nclass TCRPowerCalculator:\n\tdef __init__(self, pcmodel):\n\t\tself.pcmodel = pcmodel\n\t\tself.predict_variance = self.pcmodel.predict_variance\n\t\tself.predict_mean = self.pcmodel.predict_mean\n\t\tself.get_prediction_interval = self.pcmodel.get_prediction_interval\n\t\tself.predict_detection_probability = self.pcmodel.predict_detection_probability\n\n\t#possivle TODO: Parse this method out into a new 2-step model class\n\tdef predict_detection_probability_2step(self, tcr_frequency, num_reads, num_cells, detect_thresh = 1):\t\t\n\t\t\"\"\"\n\t\t2-step detection probability model where \n\t\t\n\t\t1) Num_cells_TCR is sampled first from the blood (Poisson model)\n\t\t2) The RNA detection probability is calculated (Negbin model).\n\t\t\n\t\tThe num_cells_TCR is marginalized with the num_cells parameter as the upper limit \n\t\ton the number of cells that could be sampled for a given TCR.\n\t\t\"\"\"\n\n\t\tmu_cells = tcr_frequency*num_cells\n\t\tp0_poisson = stats.poisson.pmf(0, mu_cells)\n\t\t\n\t\tnum_cells_TCR = np.arange(1, num_cells + 1)[:,np.newaxis]\n\t\t\n\t\t#Step 1 Poisson\n\t\tp1 = stats.poisson.pmf(num_cells_TCR, mu_cells)\n\n\t\t#Get rid of 0 probability cell counts\n\t\tnum_cells_TCR = num_cells_TCR[p1 >0]\n\t\tp1 = p1[p1 >0]\n\n\t\t#Step 2 Negbin\n\t\tmu_reads = self.pcmodel.predict_mean(num_cells_TCR/num_cells, num_reads)\n\t\t\t\t\n\t\tp2 = np.zeros(p1.shape)\n\t\tfor i in np.arange(detect_thresh):\n\t\t\tp2 += self.pcmodel.pmf(mu_reads, count = i)\n\n\t\tp0_2step = np.dot(p1.squeeze(), p2.squeeze())\n\n\t\t#If 0 cells from Poisson model then automatically get 0 reads\n\t\treturn 1.0 - p0_poisson - p0_2step\n\t\n\tdef get_limit_of_detection_tcrfreq(self, num_reads, conf_level = 0.95):\n\t\topt_f = partial(self.pcmodel.predict_detection_probability, num_reads = num_reads) \n\n\t\topt_res = optimize.root_scalar(lambda freq: opt_f(freq) - conf_level,\n\t\t \t\t\t\t\t\t\t\tmethod = \"brentq\",\n\t\t \t\t\t\t\t\t\t\tbracket = [1.0e-16, 1])\n\t\treturn opt_res.root\n\n\tdef get_limit_of_detection_nreads(self, tcr_freq, conf_level = 0.95):\n\t\topt_nreads = partial(self.pcmodel.predict_detection_probability, tcr_frequencies = tcr_freq) \n\n\t\topt_res = optimize.root_scalar(lambda nreads: opt_nreads(num_reads = nreads) - conf_level,\n\t\t\t\t\t\t\t\t\t\tmethod = \"secant\",\n\t\t\t\t\t\t\t\t\t\tx0 = 1.0e-16,\n\t\t\t\t\t\t\t\t\t\tx1 = 1)\n\t\t\n\t\treturn int(np.around(opt_res.root))",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
Simulator contains the tools needed to set up a multilayer antireflection
coating simulation.
Based on transfer matrix method outlined in Hou, H.S. 1974.
"""
# Author: Andrew Nadolski (with lots of help from previous work by Colin Merkel,
# Steve Byrnes, and Aritoki Suzuki)
# Filename: simulator.py
import glob
import os
import pprint
import time
import materials as mats
import numpy as np
import scipy as sp
class Layer:
"""A layer in the AR coating.
Attributes
----------
name : string
The name of the material comprising the layer. Default is 'Generic layer'
thickness : float
The thickness of the layer material. Default is 5 mil.
type : string
The type of layer. Default is `Layer`, which is an element of the AR
coating. Other acceptable types are `Source` and `Terminator`.
dielectric : float
The dielectric constant of the layer material. Default is 1.
losstangent : float
The loss tangent of the material. Default is 0.
"""
def __init__(self):
self.name = 'Generic layer'
self.thickness = 5.
self.type = 'Layer'
self.units = 'mil'
self.dielectric = 1.
self.losstangent = 0.
def __repr__(self):
"""Return a nice string formatted representation of the layer."""
return '{} (AR layer)'.format(self.name)
def display_layer_parameters(self):
"""Display the attributes of the layer."""
pprint.pprint(vars(self))
return
def get_index(self):
"""Return the refractive index of the layer."""
return (np.sqrt(self.dielectric))
def ideal_thickness(self, opt_freq=160e9):
"""Return the ideal quarter wavelength thickness of the AR coating layer
at a given optimization frequency.
Arguments
---------
opt_freq : float, optional
The optimization frequency (in Hz) for the layers thickness. Defaults
to 160 GHz.
"""
return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))
class SourceLayer(Layer):
"""A special case of ``Layer``; represents the layer from which the simulated wave
emanates.
Attributes
----------
thickness : float
The thickness of the source layer. Defaults to ``numpy.inf`` since the model
doesn't care about the thickness of source layer. The thickness of the
source layer should not be changed under normal operations.
type : string
The type of layer. Default is `Source`, which is an element of the model,
but not the coating. Other acceptable types are `Layer` and `Terminator`.
"""
def __init__(self):
Layer.__init__(self)
self.thickness = np.inf
self.type = 'Source'
def __repr__(self):
"""Return a nice string formatted representation of the layer."""
return '{} (source layer)'.format(self.name)
class SubstrateLayer(Layer):
"""A special case of ``Layer``; represents the layer to which the AR coating is
attached.
Attributes
----------
thickness : float
The thickness of the substrate layer. Defaults to 250 mils, which is
the typical thickness of a sample puck used in the Berkeley FTS setup.
This may be changed as is necessary, but the units must (eventually) be
converted to meters before being fed to the simulator.
type : string
The type of layer
"""
def __init__(self):
Layer.__init__(self)
self.thickness = 250.
self.type = 'Substrate'
def __repr__(self):
return '{} (substrate)'.format(self.name)
class TerminatorLayer(Layer):
"""A special case of ``Layer``; represents the layer upon which the simulated wave
terminates.
Attributes
----------
thickness : float
The thickness of the terminating layer. Defaults to ``numpy.inf`` since
the model doesn't care about the thickness of the terminating layer.
The thickness of the terminating layer should not be changed under
normal operations.
type : string
The type of layer. Default is `Terminator`, which is an element of the model,
but not the coating. Other acceptable types are `Source` and `Layer`.
"""
def __init__(self):
Layer.__init__(self)
self.thickness = np.inf
self.type = 'Terminator'
def __repr__(self):
"""Return a nice string formatted representation of the layer."""
return '{} (terminator layer)'.format(self.name)
class Builder:
"""The main body of the simulator code.
Attributes
----------
bands : list
A list of n tuples, with each tuple composed of a lower and upper limit
for a frequency band in units of hertz. Default is the SPT-3G bands.
freq_sweep : array
The range of frequencies to be simulated. Defaults to 0. Set a frequency
sweep by calling ``set_freq_sweep()``.
optimization_frequency : float
The frequency (in Hz) at which to calculate the ideal thickness for a given
material. Defaults to 160e9 Hz (160 GHz).
save_name : string
The name under which the results of the simulation are saved. Defaults to
'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid
overwriting previous simulation results.
save_path : string
The path to which the simulation results will be saved. Defaults to the
current working directory.
source : object
``Layer`` object ``SourceLayer`` that defines where the wave emanates from.
Default is `None`.
stack : list
The user-defined layers incorporated in the simulation EXCEPT the source
and terminator layers. Default is empty list.
structure : list
The layers incorporated in the simulation INCLUDING the source and
terminator layers. Default is empty list. The list is populated
by creating layers and calling ``_interconnect()``.
terminator : object
``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.
Defaults is `None`.
"""
def __init__(self):
self.bands = [(81.7e9, 107.5e9),(128.6e9, 167.2e9),(196.9e9, 249.2e9)]
self.freq_sweep = 0.
self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.time()))
self.optimization_frequency = 160e9 # given in Hz, i.e. 160 GHz
self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(time.time()))
self.save_path = '.'
self.source = None
self.stack = []
self.structure = []
self.terminator = None
def _calc_R_T_amp(self, polarization, n, delta):
"""Calculate the reflected and transmitted amplitudes
Arguments
---------
polarization : string
The polarization of the source wave. Must be one of: 's', 'p', or 'u'.
n : array
An array of refractive indices, ordered from source to terminator
delta : array
An array of wavevector offsets
Returns
-------
(r, t) : tuple
A tuple where 'r' is the reflected amplitude, and 't' is the
transmitted amplitude
"""
t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)
r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)
# # debugging statement
# print("\nr_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,r_amp[i][j]))
# # debugging statement
# print("\nt_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,t_amp[i][j]))
for i in range(len(self.structure)-1):
t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1])
r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1])
# # debugging statement
# print("\nmod r_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,r_amp[i][j]))
# # debugging statement
# print("\nmod t_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,t_amp[i][j]))
M = np.zeros((len(self.structure),2,2),dtype=complex)
# # debugging statement
# print("\nThe 'M' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("M{}{}{} ---> {}".format(i,j,k,M[i][j][k]))
m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex)
m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex)
for i in range(1,len(self.structure)-1):
m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex)
m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex)
# # debugging statement
# print("\nThe temporary 'm_r_amp' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("m_r_amp{}{}{} ---> {}".format(i,j,k,m_r_amp[i][j][k]))
# # debugging statement
# print("\nThe temporary 'm_t_amp' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("m_t_amp{}{}{} ---> {}".format(i,j,k,m_t_amp[i][j][k]))
m_temp = np.dot(m_t_amp, m_r_amp)
# # debugging statement
# print("\nThe 'm_temp' matrix is:")
# for i in m_temp:
# print i
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("m_temp{}{}{} ---> {}".format(i,j,k,m_temp[i][j][k]))
for i in range(1,len(self.structure)-1):
M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]),
0., 0., np.exp(1j*delta[i]),
dtype=complex),
self._make_2x2(1., r_amp[i,i+1], \
r_amp[i,i+1], 1., \
dtype=complex))
# # debugging statement
# print("\nThe modified 'M' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("mod M{}{}{} ---> {}".format(i,j,k,M[i][j][k]))
M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex)
# # debugging statement
# print("\nThe first modified 'M_prime' matrix is:")
# for i in range(2):
# for j in range(2):
# print("1st mod M_prime{}{} ---> {}".format(i,j,M_prime[i][j]))
for i in range(1, len(self.structure)-1):
# print("\n'M_prime' #{} is:\n{}".format(i,M_prime))
M_prime = np.dot(M_prime, M[i])
# # debugging statement
# print("\nThe second modified 'M_prime' matrix is:")
# for i in range(2):
# for j in range(2):
# print("2nd mod M_prime{}{} ---> {}".format(i,j,M_prime[i][j]))
# print("\nr_amp01 is ---> {}".format(r_amp[0,1]))
# print("t_amp01 is ---> {}".format(t_amp[0,1]))
mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1]
# # debugging statement
# print("\nThe third modified 'M_prime' matrix is:")
# for i in range(2):
# for j in range(2):
# print("3rd mod M_prime{}{} ---> {}".format(i, j, mod_M_prime[i][j]))
M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \
dtype=complex)/t_amp[0,1], M_prime)
# # debugging statement
# print("\nThe 'M_final' matrix is:")
# for i in range(2):
# for j in range(2):
# print("M_final{}{} ---> {}".format(i, j, M_prime[i][j]))
t = 1/M_prime[0,0]
r = M_prime[0,1]/M_prime[0,0]
# # debugging statement
# print("\n't' ---> {}".format(t))
# print("'r' ---> {}".format(r))
return (r, t)
def _d_converter(self):
"""Check the units of all elements in the connected ar coating
stack. Convert the lengths of the layers to meters if they are
not already in meters.
"""
units = {'um':1e-6, 'mm':1e-3, 'inch':2.54e-2, 'in':2.54e-2,\
'micron':1e-6, 'mil':2.54e-5, 'm':1.0}
for i in self.stack:
i.thickness = i.thickness*units[i.units]
return
def _find_ks(self, n, frequency, tan, lossy=True):
"""Calculate the wavenumbers.
Arguments
---------
n : array
An array of refractive indices, ordered from source to
terminator
frequency : float
The frequency at which to calculate the wavevector, k
tan : array
An array of loss tangents, ordered from vacuum to substrate
lossy : boolean, optional
If `True` the wavevector will be found for a lossy material.
If `False` the wavevector will be found for lossless material.
Default is `True`.
Returns
-------
k : complex
The complex wavenumber, k
"""
if lossy:
k = 2*np.pi*n*frequency*(1+0.5j*tan)/3e8 # New expression for loss (as of 9/13/16), this one is more physical (i.e. subtractive)
# k = 2*np.pi*n*frequency*(1-0.5j*tan)/3e8 # Original expression for loss (pre 9/13/16), but it is incorrectly ADDITIVE
else:
k = 2*np.pi*n*frequency/3e8
return k
def _find_k_offsets(self, k, d):
"""Calculate the wavenumber offset, delta.
Arguments
---------
k : array
The wavevector
d : array
An array of thicknesses, ordered from source to terminator
Returns
-------
delta : array
The wavenumber offset
"""
olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error;
# it's just the 'inf' boundaries
delta = k * d
sp.seterr(**olderr) # turn the error back on
return delta
def _get_R(self, net_r_amp):
"""Return fraction of reflected power.
Arguments
---------
net_r_amp : float
The net reflection amplitude after calculating the transfer matrix.
"""
return np.abs(net_r_amp)**2
def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.):
"""Return the fraction of transmitted power.
Arguments
---------
polarization : string
The polarization of the source wave. One of: 's' or 'p'.
net_t_amp : float
The net transmission amplitude after calculating the transfer matrix.
n_i : float
The index of refraction of material 'i'.
n_f : float
The index of refraction of material 'f'.
theta_i : float, optional
The angle of incidence at interface 'i'. Default is 0.
theta_f : float, optional
The angle of incidence at interface 'f'. Default is 0.
"""
if (polarization=='s'):
return np.abs(net_t_amp**2) * (n_f/n_i)
elif (polarization=='p'):
return np.abs(net_t_amp**2) * (n_f/n_i)
else:
raise ValueError("Polarization must be 's' or 'p'")
def _get_bandpass_stats(self):
mean = []
for band in self.bands:
pass
pass
def _interconnect(self):
"""Connect all the AR coating layer objects, ensuring that the source
and terminator layers come first and last, respectively.
"""
self.clear_structure()
self.structure.append(self.source)
for i in range(len(self.stack)):
self.structure.append(self.stack[i])
self.structure.append(self.terminator)
return
def _make_2x2(self, A11, A12, A21, A22, dtype=float):
"""Return a 2x2 array quickly.
Arguments
---------
A11 : float
Array element [0,0].
A12 : float
Array element [0,1].
A21 : float
Array element [1,0].
A22 : float
Array element [1,1].
dtype : dtype, optional
The datatype of the array. Defaults to float.
"""
array = np.empty((2,2), dtype=dtype)
array[0,0] = A11
array[0,1] = A12
array[1,0] = A21
array[1,1] = A22
return array
def _make_log(self):
pass
def _make_save_path(self, save_path, save_name):
"""Assemble the file name and path to the results file.
Returns
-------
path : string
The full path to the save destination for the simulation results
"""
if save_name.endswith('.txt'):
path = os.path.join(save_path, save_name)
else:
self.save_name = save_name+'.txt'
path = os.path.join(save_path, save_name)
return path
def _r_at_interface(self, polarization, n_1, n_2):
"""Calculate the reflected amplitude at an interface.
Arguments
---------
polarization : string
The polarization of the source wave. Must be one of: 's' or 'p'.
n_1 : float
The index of refraction of the first material.
n_2 : float
The index of refraction of the second material.
Returns
-------
reflected amplitude : float
The amplitude of the reflected power
"""
if polarization == 's':
return ((n_1-n_2)/(n_1+n_2))
elif polarization == 'p':
return ((n_1-n_2)/(n_1+n_2))
else:
raise ValueError("Polarization must be 's' or 'p'")
def _sort_ns(self):
"""Organize the refractive indices of the layers in the simulation.
Returns
-------
n : array
The ordered list of indices of refraction, from source to terminator
"""
n = []
for layer in self.structure:
n.append(layer.get_index())
n = np.asarray(n)
return n
def _sort_ds(self):
"""Organize the layers' thicknesses in the simulation.
Returns
-------
d : array
The ordered list of thicknesses, from source to terminator
"""
d = []
for layer in self.structure:
if (layer.type == 'Layer' or layer.type == 'Substrate'):
d.append(layer.thickness)
d.insert(0, self.structure[0].thickness)
d.append(self.structure[-1].thickness)
d = np.asarray(d)
return d
def _sort_tans(self):
"""Organize the loss tangents of the layers in the simulation.
Returns
-------
tan : array
The ordered list of loss tangents, from source to terminator
"""
tan = []
for layer in self.structure:
tan.append(layer.losstangent)
tan = np.asarray(tan)
return tan
def _t_at_interface(self, polarization, n_1, n_2):
"""Calculate the transmission amplitude at an interface.
Arguments
---------
polarization : string
The polarization of the source wave. Must be one of: 's' or 'p'.
n_1 : float
The index of refraction of the first material.
n_2 : float
The index of refraction of the second material.
Returns
-------
transmitted_amplitude : float
The amplitude of the transmitted power
"""
if polarization == 's':
return 2*n_1/(n_1 + n_2)
elif polarization == 'p':
return 2*n_1/(n_1 + n_2)
else:
raise ValueError("Polarization must be 's' or 'p'")
def _unpolarized_simulation(self, frequency, theta_0=0):
"""Handle the special case of unpolarized light by running the model
for both 's' and 'p' polarizations and computing the mean of the two
results.
Arguments
---------
frequency : float
The frequency (in Hz) at which to evaluate the model.
theta_0 : float, optional
The angle of incidence at the initial interface. Default is 0.
"""
s_data = self.simulate(frequency, 's', theta_0)
p_data = self.simulate(frequency, 'p', theta_0)
T = (s_data + p_data)/2
return T
def add_layer(self, material, thickness=5.0, units='mil', type='layer', \
stack_position=-1):
"""Create a layer from the set of pre-programmed materials and add it
to the AR coating stack
Arguments
---------
material : string
A key in the dictionary of materials found in materials.py.
You can view these materials by calling
'show_materials()'.
thickness : float, optional
The thickness of the AR coating layer material. Assumed to
be given in 'mil' (i.e. thousandths of an inch) unless
otherwise stated. Default is 5.
units : string, optional
The units of length for the AR coating layer. Default is 'mil'.
Must be one of:
{ 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }
type : string, optional
The layer type. Default is 'layer', which corresponds to
an AR layer. Other options are 'source' or 'terminator', which
correspond to source and terminator layers, respectively.
stack_position : int, optional
The position of the layer in the AR coating stack, indexed
from 0. Default is -1 (i.e., layer is automatically added
to the end (bottom?) of the stack.
"""
type = type.lower()
if type == 'layer':
layer = Layer()
layer.name = material.lower()
layer.thickness = thickness
layer.units = units
try:
# layer.dielectric = mats.Electrical.DIELECTRIC[layer.name]
layer.dielectric = mats.Electrical.props[layer.name][0]
except:
raise KeyError('I don\'t know that material!')
try:
# layer.losstangent = mats.Electrical.LOSS_TAN[layer.name]
layer.losstangent = mats.Electrical.props[layer.name][1]
except:
layer.losstangent = 0
print('\nI don\'t know this loss tangent. Setting loss to 0!')
if (stack_position == -1):
self.stack.append(layer)
else:
self.stack.insert(stack_position, layer)
elif type == 'source':
self.source = SourceLayer()
self.source.name = material.lower()
try:
# self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name]
self.source.dielectric = mats.Electrical.props[self.source.name][0]
except:
raise KeyError('I don\'t know that material!')
try:
# self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name]
self.source.losstangent = mats.Electrical.props[self.source.name][1]
except:
self.source.losstangent = 0
print('\nI don\'t know this loss tangent. Setting loss to 0!')
elif type == 'terminator':
self.terminator = TerminatorLayer()
self.terminator.name = material.lower()
try:
# self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name]
self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0]
except:
raise KeyError('I don\'t know that material!')
try:
# self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name]
self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1]
except:
self.terminator.losstangent = 0
print('\nI don\'t know this loss tangent. Setting loss to 0!')
else:
raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR')
return
def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):
"""Add a layer with custom properties to the AR stack.
Arguments
---------
material : string
The name of the layer
thickness : float
The thickness of the layer
units : string
The units of length for the AR coating layer. Must be one of:
{ 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }
dielectric : float
The dielectric constant of the AR coating layer
loss_tangent : float
The loss tangent of the AR coating layer
stack_position : int, optional
The position of the layer in the AR coating stack, indexed
from 0. Default is -1 (i.e., layer is automatically added
to the end (bottom?) of the stack.
"""
layer = Layer()
layer.units = units
layer.thickness = thickness
layer.dielectric = dielectric
layer.losstangent = loss_tangent
if (stack_position == -1):
self.stack.append(layer)
else:
self.stack.insert(stack_position, layer)
return
def display_sim_parameters(self):
"""Display all the simulation parameters in one place."""
pprint.pprint(vars(self))
return
def clear_structure(self):
"""Remove all elements from the current AR ``structure``."""
self.structure = []
return
def remove_layer(self, layer_pos):
"""Remove the specified layer from the AR coating stack.
Arguments
---------
layer_pos : int
The list index of the layer to remove from the AR coating stack
"""
self.stack.pop(layer_pos)
return
def run_sim(self):
"""Take the attributes of the ``Builder()`` object and execute the
simulation at each frequency in ``Builder().freq_sweep``. Save the
output to a columnized, tab-separated text file.
Returns
-------
transmission : array
A three-element array. The first element is a list of
frequencies, the second elements is a list of the
transmissions at each frequency, and the third is a list of
the reflections at each frequency.
"""
t0 = time.time()
print('Beginning AR coating simulation')
self._d_converter()
self._interconnect()
f_list = []
t_list = []
r_list = []
for f in self.freq_sweep:
results = self.sim_single_freq(f)
f_list.append(f)
t_list.append(results['T'])
r_list.append(results['R'])
fs = np.asarray(f_list)
ts = np.asarray(t_list)
rs = np.asarray(r_list)
results = np.array([fs, ts, rs])
t = time.ctime(time.time())
data_name = self._make_save_path(self.save_path, self.save_name)
header = 'Frequency (Hz)\t\tTransmission amplitude\t\tReflection amplitude'
# log_name = self._make_save_path(self.save_path, self.log_name)
# log = self._make_log()
with open(data_name, 'wb') as f:
np.savetxt(f, np.c_[fs, ts, rs], delimiter='\t', header=header)
# with open(log_name, 'wb') as f:
# for line in log:
# f.writelines(line)
# f.write('\n')
print('Finished running AR coating simulation')
t1 = time.time()
t_elapsed = t1-t0
print('Elapsed time: {t}s\n'.format(t=t_elapsed))
return results
def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units='ghz'):
"""Set the frequency range over which the simulation will run.
Arguments
---------
lower_bound : float
The low end of the frequency range, given in GHz.
upper_bound : float
The high end of the frequency range, given in GHz.
reolution : float, optional
The interval at which to sample the frequency range, given in GHz.
Defaults to 1 GHz.
units : str
The units of frequency. Must be one of:
Hz, hz, KHz, khz, MHz, mhz, GHz, ghz
"""
convert = {'Hz':1.0, 'hz':1.0, 'KHz':1e3, 'khz':1e3, 'MHz':1e6,
'mhz':1e6, 'GHz':1e9, 'ghz':1e9}
low = lower_bound*convert[units]
high = upper_bound*convert[units]
samples = (high-low)/resolution
self.freq_sweep = np.linspace(low, high, samples)
return
# def set_source_layer(self, material):
# """Change the source layer.
# Arguments
# ---------
# material : string
# A key in the dielectrics dictionary.
# """
# self.source = SourceLayer(material)
# return
# def set_terminator_layer(self, material):
# """Change the terminator layer.
# Arguments
# ---------
# material : string
# A key in the dielectrics dictionary.
# """
# self.terminator = TerminatorLayer(material)
# return
def show_materials(self):
"""List the materials with known properties. The listed material names
are keys in the materials properties dictionary.
"""
print('\nThe materials with known dielectric properties are:\n')
pprint.pprint(mats.Electrical.props)
# pprint.pprint(mats.Electrical.DIELECTRIC)
print('\nThe materials with known loss tangents are:\n')
pprint.pprint(mats.Electrical.props)
# pprint.pprint(mats.Electrical.LOSS_TAN)
return
def sim_single_freq(self, frequency, polarization='s', theta_0=0):
"""Run the model simulation for a single frequency.
Arguments
---------
frequency : float
The frequency at which to evaluate the model (in Hz).
polarization : string, optional
The polarization of the source wave. Must be one of: 's',
'p', or 'u'. Default is 's'.
### NOTE ###
I've chosen 's' polarization as the default because this
simulator only handles normal incidence waves, and and at
normal incidence 's' and 'p' are equivalent.
theta_0 : float, optional
The angle of incidence at the first interface.
Returns
-------
result : dict
dict = {
'T' : array; the total transmission through the model.
'R' : array; the total reflection through the model.
}
"""
# check the desired polarization
# if polarization == 'u':
# return self._unpolarized_simulation(frequency)
n = self._sort_ns() # get all refractive indices
d = self._sort_ds() # get all thicknesses
tan = self._sort_tans() # get all loss tans
k = self._find_ks(n, frequency, tan) # find all wavevectors, k
delta = self._find_k_offsets(k, d) # calculate all offsets
r, t = self._calc_R_T_amp(polarization, n, delta) # get trans, ref amps
T = self._get_T(polarization, t, n[0], n[-1]) # find net trans, ref power
R = self._get_R(r)
result = {'T':T, 'R':R}
return result
def snell(self, indices, theta_0):
"""Caclulate the Snell angles for the entire model.
Arguments
---------
indices : list
The list of indices of refraction for all elements in the model,
ordered from source to terminator.
theta_0 : float
The angle of incidence at the first interface.
"""
return sp.arcsin(np.real_if_close(n_list[0]*np.sin(th_0) / n_list))
class MCMC:
"""Contains the methods specific to ``emcee``, the MCMC Hammer, and helper
methods to set up MCMC simulations and visualize the results.
"""
def __init__(self):
self.name = 'blah'
self.priors = []
def __repr__(self):
return '{} (MCMC object)'.format(self.name)
def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'):
"""Add a prior to a part of the model in order to constrain the total
simulation space. Can only place constraints on thickness and dielectric
for now.
Arguments
---------
layer_number : int
The position of the layer in the AR coating stack. Indexed from 1, so
incident `vacuum` is 0 and first AR coating layer is 1.
prior_type : string
Flags the prior as either a cut to dielectric constant or thickness.
One of 'thickness', 't', 'dielectric', or 'd'.
low_bound : float
The lower boundary of the range.
hi_bound : float
The higher boundary of the range.
units : string, optional
The units of the lower and upper bounds. Only applies to 'thickness'
cuts because dielectric constants are unitless. Defaults to `mils`.
"""
prior = {'layer_number':layer_number, 'prior_type':prior_type, \
'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units}
self.priors.append(prior)
return
def lnlikelihood(self):
return
def lnprior(self):
"""Define the known prior attributes of the model in order to constrain
the simulation space.
"""
return
def lnprobability(self):
"""The logspace sum of ``lnprior`` and ``lnlikelihood``.
"""
return
def sort_priors(self):
"""Sort the contents of ``self.prior`` by layer number
Returns
-------
sorted_priors : list
A list of priors sorted by layer number. If a layer has both
thickness and dielectric priors, the thickness dielectric is first
and the dielectric is second.
"""
return
|
normal
|
{
"blob_id": "a2292bc9cee57c5d4a7d36c66510ce4b4f3e20da",
"index": 3687,
"step-1": "<mask token>\n\n\nclass SubstrateLayer(Layer):\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n",
"step-2": "<mask token>\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.0\n self.type = 'Substrate'\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n",
"step-3": "<mask token>\n\n\nclass Layer:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_index(self):\n \"\"\"Return the refractive index of the layer.\"\"\"\n return np.sqrt(self.dielectric)\n <mask token>\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.0\n self.type = 'Substrate'\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n",
"step-4": "<mask token>\n\n\nclass Layer:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_index(self):\n \"\"\"Return the refractive index of the layer.\"\"\"\n return np.sqrt(self.dielectric)\n\n def ideal_thickness(self, opt_freq=160000000000.0):\n \"\"\"Return the ideal quarter wavelength thickness of the AR coating layer\n at a given optimization frequency.\n \n Arguments\n ---------\n opt_freq : float, optional\n The optimization frequency (in Hz) for the layers thickness. Defaults \n to 160 GHz.\n \"\"\"\n return 1 / np.sqrt(self.dielectric) * 300000000.0 / (4 * opt_freq)\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.0\n self.type = 'Substrate'\n\n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n\n def __init__(self):\n self.bands = [(81700000000.0, 107500000000.0), (128600000000.0, \n 167200000000.0), (196900000000.0, 249200000000.0)]\n self.freq_sweep = 0.0\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.\n time()))\n self.optimization_frequency = 160000000000.0\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(\n time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=\n complex)\n for i in range(len(self.structure) - 1):\n t_amp[i, i + 1] = self._t_at_interface(polarization, n[i], n[i + 1]\n )\n r_amp[i, i + 1] = self._r_at_interface(polarization, n[i], n[i + 1]\n )\n M = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_r_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure), 2, 2), dtype=complex)\n for i in range(1, len(self.structure) - 1):\n m_t_amp[i] = self._make_2x2(np.exp(-1.0j * delta[i]), 0.0, 0.0,\n np.exp(1.0j * delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + \n 1], 1.0, dtype=complex)\n m_temp = np.dot(m_t_amp, m_r_amp)\n for i in range(1, len(self.structure) - 1):\n M[i] = 1 / t_amp[i, i + 1] * np.dot(self._make_2x2(np.exp(-1.0j *\n delta[i]), 0.0, 0.0, np.exp(1.0j * delta[i]), dtype=complex\n ), self._make_2x2(1.0, r_amp[i, i + 1], r_amp[i, i + 1], \n 1.0, dtype=complex))\n M_prime = self._make_2x2(1.0, 0.0, 0.0, 1.0, dtype=complex)\n for i in range(1, len(self.structure) - 1):\n M_prime = np.dot(M_prime, M[i])\n mod_M_prime = self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1]\n M_prime = np.dot(self._make_2x2(1.0, r_amp[0, 1], r_amp[0, 1], 1.0,\n dtype=complex) / t_amp[0, 1], M_prime)\n t = 1 / M_prime[0, 0]\n r = M_prime[0, 1] / M_prime[0, 0]\n return r, t\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um': 1e-06, 'mm': 0.001, 'inch': 0.0254, 'in': 0.0254,\n 'micron': 1e-06, 'mil': 2.54e-05, 'm': 1.0}\n for i in self.stack:\n i.thickness = i.thickness * units[i.units]\n return\n\n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2 * np.pi * n * frequency * (1 + 0.5j * tan) / 300000000.0\n else:\n k = 2 * np.pi * n * frequency / 300000000.0\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid='ignore')\n delta = k * d\n sp.seterr(**olderr)\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp) ** 2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0.0,\n theta_f=0.0):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if polarization == 's':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n elif polarization == 'p':\n return np.abs(net_t_amp ** 2) * (n_f / n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2, 2), dtype=dtype)\n array[0, 0] = A11\n array[0, 1] = A12\n array[1, 0] = A21\n array[1, 1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name + '.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return (n_1 - n_2) / (n_1 + n_2)\n elif polarization == 'p':\n return (n_1 - n_2) / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if layer.type == 'Layer' or layer.type == 'Substrate':\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2 * n_1 / (n_1 + n_2)\n elif polarization == 'p':\n return 2 * n_1 / (n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data) / 2\n return T\n\n def add_layer(self, material, thickness=5.0, units='mil', type='layer',\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n self.source.dielectric = mats.Electrical.props[self.source.name\n ][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.source.losstangent = mats.Electrical.props[self.source\n .name][1]\n except:\n self.source.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n self.terminator.dielectric = mats.Electrical.props[self.\n terminator.name][0]\n except:\n raise KeyError(\"I don't know that material!\")\n try:\n self.terminator.losstangent = mats.Electrical.props[self.\n terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print(\"\\nI don't know this loss tangent. Setting loss to 0!\")\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR'\n )\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric,\n loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if stack_position == -1:\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = (\n 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude')\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1 - t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units=\n 'ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz': 1.0, 'hz': 1.0, 'KHz': 1000.0, 'khz': 1000.0,\n 'MHz': 1000000.0, 'mhz': 1000000.0, 'GHz': 1000000000.0, 'ghz':\n 1000000000.0}\n low = lower_bound * convert[units]\n high = upper_bound * convert[units]\n samples = (high - low) / resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n n = self._sort_ns()\n d = self._sort_ds()\n tan = self._sort_tans()\n k = self._find_ks(n, frequency, tan)\n delta = self._find_k_offsets(k, d)\n r, t = self._calc_R_T_amp(polarization, n, delta)\n T = self._get_T(polarization, t, n[0], n[-1])\n R = self._get_R(r)\n result = {'T': T, 'R': R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0] * np.sin(th_0) / n_list))\n\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound,\n units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number': layer_number, 'prior_type': prior_type,\n 'low_bound': low_bound, 'hi_bound': hi_bound, 'units': units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n",
"step-5": "\"\"\"\nSimulator contains the tools needed to set up a multilayer antireflection\ncoating simulation.\n\nBased on transfer matrix method outlined in Hou, H.S. 1974.\n\"\"\"\n\n# Author: Andrew Nadolski (with lots of help from previous work by Colin Merkel,\n# Steve Byrnes, and Aritoki Suzuki)\n# Filename: simulator.py\n\n\nimport glob\nimport os\nimport pprint\nimport time\nimport materials as mats\nimport numpy as np\nimport scipy as sp\n\n\nclass Layer:\n \"\"\"A layer in the AR coating.\n\n Attributes\n ----------\n name : string\n The name of the material comprising the layer. Default is 'Generic layer'\n thickness : float\n The thickness of the layer material. Default is 5 mil.\n type : string\n The type of layer. Default is `Layer`, which is an element of the AR\n coating. Other acceptable types are `Source` and `Terminator`.\n dielectric : float\n The dielectric constant of the layer material. Default is 1.\n losstangent : float\n The loss tangent of the material. Default is 0.\n \"\"\"\n def __init__(self):\n self.name = 'Generic layer'\n self.thickness = 5.\n self.type = 'Layer'\n self.units = 'mil'\n self.dielectric = 1.\n self.losstangent = 0.\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (AR layer)'.format(self.name)\n\n def display_layer_parameters(self):\n \"\"\"Display the attributes of the layer.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def get_index(self):\n \"\"\"Return the refractive index of the layer.\"\"\"\n return (np.sqrt(self.dielectric))\n\n def ideal_thickness(self, opt_freq=160e9):\n \"\"\"Return the ideal quarter wavelength thickness of the AR coating layer\n at a given optimization frequency.\n \n Arguments\n ---------\n opt_freq : float, optional\n The optimization frequency (in Hz) for the layers thickness. Defaults \n to 160 GHz.\n \"\"\"\n return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))\n\n\nclass SourceLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer from which the simulated wave \n emanates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the source layer. Defaults to ``numpy.inf`` since the model\n doesn't care about the thickness of source layer. The thickness of the\n source layer should not be changed under normal operations.\n type : string\n The type of layer. Default is `Source`, which is an element of the model,\n but not the coating. Other acceptable types are `Layer` and `Terminator`.\n \"\"\"\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Source'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (source layer)'.format(self.name)\n\n\nclass SubstrateLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer to which the AR coating is \n attached.\n\n Attributes\n ----------\n thickness : float\n The thickness of the substrate layer. Defaults to 250 mils, which is \n the typical thickness of a sample puck used in the Berkeley FTS setup.\n This may be changed as is necessary, but the units must (eventually) be\n converted to meters before being fed to the simulator.\n type : string\n The type of layer\n \"\"\"\n def __init__(self):\n Layer.__init__(self)\n self.thickness = 250.\n self.type = 'Substrate'\n \n def __repr__(self):\n return '{} (substrate)'.format(self.name)\n\n\nclass TerminatorLayer(Layer):\n \"\"\"A special case of ``Layer``; represents the layer upon which the simulated wave \n terminates.\n\n Attributes\n ----------\n thickness : float\n The thickness of the terminating layer. Defaults to ``numpy.inf`` since\n the model doesn't care about the thickness of the terminating layer. \n The thickness of the terminating layer should not be changed under \n normal operations.\n type : string\n The type of layer. Default is `Terminator`, which is an element of the model,\n but not the coating. Other acceptable types are `Source` and `Layer`.\n \"\"\"\n def __init__(self):\n Layer.__init__(self)\n self.thickness = np.inf\n self.type = 'Terminator'\n\n def __repr__(self):\n \"\"\"Return a nice string formatted representation of the layer.\"\"\"\n return '{} (terminator layer)'.format(self.name)\n\n\nclass Builder:\n \"\"\"The main body of the simulator code.\n\n Attributes\n ----------\n bands : list\n A list of n tuples, with each tuple composed of a lower and upper limit\n for a frequency band in units of hertz. Default is the SPT-3G bands.\n freq_sweep : array\n The range of frequencies to be simulated. Defaults to 0. Set a frequency\n sweep by calling ``set_freq_sweep()``.\n optimization_frequency : float\n The frequency (in Hz) at which to calculate the ideal thickness for a given\n material. Defaults to 160e9 Hz (160 GHz).\n save_name : string\n The name under which the results of the simulation are saved. Defaults to\n 'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid\n overwriting previous simulation results.\n save_path : string\n The path to which the simulation results will be saved. Defaults to the \n current working directory.\n source : object\n ``Layer`` object ``SourceLayer`` that defines where the wave emanates from.\n Default is `None`.\n stack : list\n The user-defined layers incorporated in the simulation EXCEPT the source\n and terminator layers. Default is empty list.\n structure : list\n The layers incorporated in the simulation INCLUDING the source and\n terminator layers. Default is empty list. The list is populated \n by creating layers and calling ``_interconnect()``.\n terminator : object\n ``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.\n Defaults is `None`.\n \"\"\"\n def __init__(self):\n self.bands = [(81.7e9, 107.5e9),(128.6e9, 167.2e9),(196.9e9, 249.2e9)]\n self.freq_sweep = 0.\n self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.time()))\n self.optimization_frequency = 160e9 # given in Hz, i.e. 160 GHz\n self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(time.time()))\n self.save_path = '.'\n self.source = None\n self.stack = []\n self.structure = []\n self.terminator = None\n\n def _calc_R_T_amp(self, polarization, n, delta):\n \"\"\"Calculate the reflected and transmitted amplitudes\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's', 'p', or 'u'.\n n : array\n An array of refractive indices, ordered from source to terminator\n delta : array\n An array of wavevector offsets\n \n Returns\n -------\n (r, t) : tuple\n A tuple where 'r' is the reflected amplitude, and 't' is the\n transmitted amplitude\n \"\"\"\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n# # debugging statement\n# print(\"\\nr_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nt_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n for i in range(len(self.structure)-1):\n t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1])\n r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1])\n# # debugging statement\n# print(\"\\nmod r_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nmod t_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n M = np.zeros((len(self.structure),2,2),dtype=complex)\n# # debugging statement\n# print(\"\\nThe 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n for i in range(1,len(self.structure)-1):\n m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_r_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_r_amp{}{}{} ---> {}\".format(i,j,k,m_r_amp[i][j][k]))\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_t_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_t_amp{}{}{} ---> {}\".format(i,j,k,m_t_amp[i][j][k]))\n\n m_temp = np.dot(m_t_amp, m_r_amp)\n\n# # debugging statement\n# print(\"\\nThe 'm_temp' matrix is:\")\n# for i in m_temp:\n# print i\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_temp{}{}{} ---> {}\".format(i,j,k,m_temp[i][j][k]))\n\n for i in range(1,len(self.structure)-1):\n M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]),\n 0., 0., np.exp(1j*delta[i]),\n dtype=complex),\n self._make_2x2(1., r_amp[i,i+1], \\\n r_amp[i,i+1], 1., \\\n dtype=complex))\n# # debugging statement\n# print(\"\\nThe modified 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"mod M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe first modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"1st mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n for i in range(1, len(self.structure)-1):\n# print(\"\\n'M_prime' #{} is:\\n{}\".format(i,M_prime))\n M_prime = np.dot(M_prime, M[i])\n\n# # debugging statement\n# print(\"\\nThe second modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"2nd mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n# print(\"\\nr_amp01 is ---> {}\".format(r_amp[0,1]))\n# print(\"t_amp01 is ---> {}\".format(t_amp[0,1]))\n\n mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1]\n\n# # debugging statement\n# print(\"\\nThe third modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"3rd mod M_prime{}{} ---> {}\".format(i, j, mod_M_prime[i][j]))\n\n M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \\\n dtype=complex)/t_amp[0,1], M_prime)\n\n# # debugging statement\n# print(\"\\nThe 'M_final' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"M_final{}{} ---> {}\".format(i, j, M_prime[i][j]))\n\n t = 1/M_prime[0,0]\n r = M_prime[0,1]/M_prime[0,0]\n\n# # debugging statement\n# print(\"\\n't' ---> {}\".format(t))\n# print(\"'r' ---> {}\".format(r))\n\n return (r, t)\n\n def _d_converter(self):\n \"\"\"Check the units of all elements in the connected ar coating\n stack. Convert the lengths of the layers to meters if they are\n not already in meters.\n \"\"\"\n units = {'um':1e-6, 'mm':1e-3, 'inch':2.54e-2, 'in':2.54e-2,\\\n 'micron':1e-6, 'mil':2.54e-5, 'm':1.0}\n for i in self.stack:\n i.thickness = i.thickness*units[i.units]\n return\n \n def _find_ks(self, n, frequency, tan, lossy=True):\n \"\"\"Calculate the wavenumbers.\n\n Arguments\n ---------\n n : array\n An array of refractive indices, ordered from source to\n terminator\n frequency : float\n The frequency at which to calculate the wavevector, k\n tan : array\n An array of loss tangents, ordered from vacuum to substrate\n lossy : boolean, optional\n If `True` the wavevector will be found for a lossy material.\n If `False` the wavevector will be found for lossless material.\n Default is `True`.\n Returns\n -------\n k : complex\n The complex wavenumber, k\n \"\"\"\n if lossy:\n k = 2*np.pi*n*frequency*(1+0.5j*tan)/3e8 # New expression for loss (as of 9/13/16), this one is more physical (i.e. subtractive)\n# k = 2*np.pi*n*frequency*(1-0.5j*tan)/3e8 # Original expression for loss (pre 9/13/16), but it is incorrectly ADDITIVE\n else:\n k = 2*np.pi*n*frequency/3e8\n return k\n\n def _find_k_offsets(self, k, d):\n \"\"\"Calculate the wavenumber offset, delta.\n\n Arguments\n ---------\n k : array\n The wavevector\n d : array\n An array of thicknesses, ordered from source to terminator\n\n Returns\n -------\n delta : array\n The wavenumber offset\n \"\"\"\n olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error;\n # it's just the 'inf' boundaries\n delta = k * d\n sp.seterr(**olderr) # turn the error back on\n return delta\n\n def _get_R(self, net_r_amp):\n \"\"\"Return fraction of reflected power.\n\n Arguments\n ---------\n net_r_amp : float\n The net reflection amplitude after calculating the transfer matrix.\n \"\"\"\n return np.abs(net_r_amp)**2\n\n def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.):\n \"\"\"Return the fraction of transmitted power.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. One of: 's' or 'p'.\n net_t_amp : float\n The net transmission amplitude after calculating the transfer matrix.\n n_i : float\n The index of refraction of material 'i'.\n n_f : float\n The index of refraction of material 'f'.\n theta_i : float, optional\n The angle of incidence at interface 'i'. Default is 0.\n theta_f : float, optional\n The angle of incidence at interface 'f'. Default is 0.\n \"\"\"\n if (polarization=='s'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n elif (polarization=='p'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _get_bandpass_stats(self):\n mean = []\n for band in self.bands:\n pass\n pass\n\n def _interconnect(self):\n \"\"\"Connect all the AR coating layer objects, ensuring that the source\n and terminator layers come first and last, respectively.\n \"\"\"\n self.clear_structure()\n self.structure.append(self.source)\n for i in range(len(self.stack)):\n self.structure.append(self.stack[i])\n self.structure.append(self.terminator)\n return\n\n def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n \"\"\"Return a 2x2 array quickly.\n\n Arguments\n ---------\n A11 : float\n Array element [0,0].\n A12 : float\n Array element [0,1].\n A21 : float\n Array element [1,0].\n A22 : float\n Array element [1,1].\n dtype : dtype, optional\n The datatype of the array. Defaults to float.\n \"\"\"\n array = np.empty((2,2), dtype=dtype)\n array[0,0] = A11\n array[0,1] = A12\n array[1,0] = A21\n array[1,1] = A22\n return array\n\n def _make_log(self):\n pass\n\n def _make_save_path(self, save_path, save_name):\n \"\"\"Assemble the file name and path to the results file.\n \n Returns\n -------\n path : string\n The full path to the save destination for the simulation results\n \"\"\"\n if save_name.endswith('.txt'):\n path = os.path.join(save_path, save_name)\n else:\n self.save_name = save_name+'.txt'\n path = os.path.join(save_path, save_name)\n return path\n\n def _r_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the reflected amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n reflected amplitude : float\n The amplitude of the reflected power\n \"\"\"\n if polarization == 's':\n return ((n_1-n_2)/(n_1+n_2))\n elif polarization == 'p':\n return ((n_1-n_2)/(n_1+n_2))\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _sort_ns(self):\n \"\"\"Organize the refractive indices of the layers in the simulation.\n\n Returns\n -------\n n : array\n The ordered list of indices of refraction, from source to terminator\n \"\"\"\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n\n\n def _sort_ds(self):\n \"\"\"Organize the layers' thicknesses in the simulation.\n\n Returns\n -------\n d : array\n The ordered list of thicknesses, from source to terminator\n \"\"\"\n d = []\n for layer in self.structure:\n if (layer.type == 'Layer' or layer.type == 'Substrate'):\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d\n\n def _sort_tans(self):\n \"\"\"Organize the loss tangents of the layers in the simulation.\n\n Returns\n -------\n tan : array\n The ordered list of loss tangents, from source to terminator\n \"\"\"\n tan = []\n for layer in self.structure:\n tan.append(layer.losstangent)\n tan = np.asarray(tan)\n return tan\n\n def _t_at_interface(self, polarization, n_1, n_2):\n \"\"\"Calculate the transmission amplitude at an interface.\n\n Arguments\n ---------\n polarization : string\n The polarization of the source wave. Must be one of: 's' or 'p'.\n n_1 : float\n The index of refraction of the first material.\n n_2 : float\n The index of refraction of the second material.\n\n Returns\n -------\n transmitted_amplitude : float\n The amplitude of the transmitted power\n \"\"\"\n if polarization == 's':\n return 2*n_1/(n_1 + n_2)\n elif polarization == 'p':\n return 2*n_1/(n_1 + n_2)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")\n\n def _unpolarized_simulation(self, frequency, theta_0=0):\n \"\"\"Handle the special case of unpolarized light by running the model\n for both 's' and 'p' polarizations and computing the mean of the two\n results.\n\n Arguments\n ---------\n frequency : float\n The frequency (in Hz) at which to evaluate the model.\n theta_0 : float, optional\n The angle of incidence at the initial interface. Default is 0.\n \"\"\"\n s_data = self.simulate(frequency, 's', theta_0)\n p_data = self.simulate(frequency, 'p', theta_0)\n T = (s_data + p_data)/2\n return T\n \n def add_layer(self, material, thickness=5.0, units='mil', type='layer', \\\n stack_position=-1):\n \"\"\"Create a layer from the set of pre-programmed materials and add it\n to the AR coating stack\n\n Arguments\n ---------\n material : string\n A key in the dictionary of materials found in materials.py.\n You can view these materials by calling\n 'show_materials()'.\n thickness : float, optional\n The thickness of the AR coating layer material. Assumed to\n be given in 'mil' (i.e. thousandths of an inch) unless\n otherwise stated. Default is 5.\n units : string, optional\n The units of length for the AR coating layer. Default is 'mil'.\n Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n type : string, optional\n The layer type. Default is 'layer', which corresponds to\n an AR layer. Other options are 'source' or 'terminator', which\n correspond to source and terminator layers, respectively.\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n# layer.dielectric = mats.Electrical.DIELECTRIC[layer.name]\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# layer.losstangent = mats.Electrical.LOSS_TAN[layer.name]\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n if (stack_position == -1):\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n# self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name]\n self.source.dielectric = mats.Electrical.props[self.source.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name]\n self.source.losstangent = mats.Electrical.props[self.source.name][1]\n except:\n self.source.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n# self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name]\n self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name]\n self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR')\n return\n\n def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):\n \"\"\"Add a layer with custom properties to the AR stack.\n\n Arguments\n ---------\n material : string\n The name of the layer\n thickness : float\n The thickness of the layer\n units : string\n The units of length for the AR coating layer. Must be one of:\n { 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }\n dielectric : float\n The dielectric constant of the AR coating layer\n loss_tangent : float\n The loss tangent of the AR coating layer\n stack_position : int, optional\n The position of the layer in the AR coating stack, indexed\n from 0. Default is -1 (i.e., layer is automatically added\n to the end (bottom?) of the stack.\n \"\"\"\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if (stack_position == -1):\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return\n\n def display_sim_parameters(self):\n \"\"\"Display all the simulation parameters in one place.\"\"\"\n pprint.pprint(vars(self))\n return\n\n def clear_structure(self):\n \"\"\"Remove all elements from the current AR ``structure``.\"\"\"\n self.structure = []\n return\n\n def remove_layer(self, layer_pos):\n \"\"\"Remove the specified layer from the AR coating stack.\n\n Arguments\n ---------\n layer_pos : int\n The list index of the layer to remove from the AR coating stack\n \"\"\"\n self.stack.pop(layer_pos)\n return\n\n def run_sim(self):\n \"\"\"Take the attributes of the ``Builder()`` object and execute the\n simulation at each frequency in ``Builder().freq_sweep``. Save the\n output to a columnized, tab-separated text file.\n\n Returns\n -------\n transmission : array\n A three-element array. The first element is a list of\n frequencies, the second elements is a list of the\n transmissions at each frequency, and the third is a list of\n the reflections at each frequency.\n \"\"\"\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude'\n# log_name = self._make_save_path(self.save_path, self.log_name)\n# log = self._make_log()\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n# with open(log_name, 'wb') as f:\n# for line in log:\n# f.writelines(line)\n# f.write('\\n')\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1-t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results\n\n def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units='ghz'):\n \"\"\"Set the frequency range over which the simulation will run.\n \n Arguments\n ---------\n lower_bound : float\n The low end of the frequency range, given in GHz.\n upper_bound : float\n The high end of the frequency range, given in GHz.\n reolution : float, optional\n The interval at which to sample the frequency range, given in GHz.\n Defaults to 1 GHz.\n units : str\n The units of frequency. Must be one of:\n Hz, hz, KHz, khz, MHz, mhz, GHz, ghz\n \"\"\"\n convert = {'Hz':1.0, 'hz':1.0, 'KHz':1e3, 'khz':1e3, 'MHz':1e6,\n 'mhz':1e6, 'GHz':1e9, 'ghz':1e9}\n low = lower_bound*convert[units]\n high = upper_bound*convert[units]\n samples = (high-low)/resolution\n self.freq_sweep = np.linspace(low, high, samples)\n return\n\n# def set_source_layer(self, material):\n# \"\"\"Change the source layer.\n\n# Arguments\n# ---------\n# material : string\n# A key in the dielectrics dictionary.\n# \"\"\"\n# self.source = SourceLayer(material)\n# return\n\n# def set_terminator_layer(self, material):\n# \"\"\"Change the terminator layer.\n\n# Arguments\n# ---------\n# material : string\n# A key in the dielectrics dictionary.\n# \"\"\"\n# self.terminator = TerminatorLayer(material)\n# return\n\n def show_materials(self):\n \"\"\"List the materials with known properties. The listed material names \n are keys in the materials properties dictionary. \n \"\"\"\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.DIELECTRIC)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.LOSS_TAN)\n return\n\n def sim_single_freq(self, frequency, polarization='s', theta_0=0):\n \"\"\"Run the model simulation for a single frequency.\n\n Arguments\n ---------\n frequency : float\n The frequency at which to evaluate the model (in Hz).\n polarization : string, optional\n The polarization of the source wave. Must be one of: 's', \n 'p', or 'u'. Default is 's'.\n \n ### NOTE ###\n I've chosen 's' polarization as the default because this \n simulator only handles normal incidence waves, and and at \n normal incidence 's' and 'p' are equivalent.\n theta_0 : float, optional\n The angle of incidence at the first interface.\n\n Returns\n -------\n result : dict\n dict = {\n 'T' : array; the total transmission through the model.\n 'R' : array; the total reflection through the model.\n }\n \"\"\"\n # check the desired polarization\n# if polarization == 'u':\n# return self._unpolarized_simulation(frequency)\n n = self._sort_ns() # get all refractive indices\n d = self._sort_ds() # get all thicknesses\n tan = self._sort_tans() # get all loss tans\n k = self._find_ks(n, frequency, tan) # find all wavevectors, k\n delta = self._find_k_offsets(k, d) # calculate all offsets\n r, t = self._calc_R_T_amp(polarization, n, delta) # get trans, ref amps\n T = self._get_T(polarization, t, n[0], n[-1]) # find net trans, ref power\n R = self._get_R(r)\n result = {'T':T, 'R':R}\n return result\n\n def snell(self, indices, theta_0):\n \"\"\"Caclulate the Snell angles for the entire model.\n\n Arguments\n ---------\n indices : list\n The list of indices of refraction for all elements in the model,\n ordered from source to terminator.\n theta_0 : float\n The angle of incidence at the first interface.\n \"\"\"\n return sp.arcsin(np.real_if_close(n_list[0]*np.sin(th_0) / n_list))\n\nclass MCMC:\n \"\"\"Contains the methods specific to ``emcee``, the MCMC Hammer, and helper\n methods to set up MCMC simulations and visualize the results.\n \"\"\"\n def __init__(self):\n self.name = 'blah'\n self.priors = []\n\n def __repr__(self):\n return '{} (MCMC object)'.format(self.name)\n\n def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'):\n \"\"\"Add a prior to a part of the model in order to constrain the total\n simulation space. Can only place constraints on thickness and dielectric\n for now.\n\n Arguments\n ---------\n layer_number : int\n The position of the layer in the AR coating stack. Indexed from 1, so\n incident `vacuum` is 0 and first AR coating layer is 1.\n prior_type : string\n Flags the prior as either a cut to dielectric constant or thickness.\n One of 'thickness', 't', 'dielectric', or 'd'.\n low_bound : float\n The lower boundary of the range.\n hi_bound : float\n The higher boundary of the range.\n units : string, optional\n The units of the lower and upper bounds. Only applies to 'thickness'\n cuts because dielectric constants are unitless. Defaults to `mils`.\n \"\"\"\n prior = {'layer_number':layer_number, 'prior_type':prior_type, \\\n 'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units}\n self.priors.append(prior)\n return\n\n def lnlikelihood(self):\n return\n\n def lnprior(self):\n \"\"\"Define the known prior attributes of the model in order to constrain\n the simulation space.\n \"\"\"\n \n return\n\n def lnprobability(self):\n \"\"\"The logspace sum of ``lnprior`` and ``lnlikelihood``.\n \"\"\"\n return\n\n def sort_priors(self):\n \"\"\"Sort the contents of ``self.prior`` by layer number\n \n Returns\n -------\n sorted_priors : list\n A list of priors sorted by layer number. If a layer has both\n thickness and dielectric priors, the thickness dielectric is first\n and the dielectric is second.\n \"\"\"\n return\n",
"step-ids": [
45,
51,
53,
54,
60
]
}
|
[
45,
51,
53,
54,
60
] |
import requests
import json
r = requests.get('http://pythonspot.com/')
jsondata = str(r.headers).replace("'", '"')
print(jsondata)
#headerObj = json.loads(jsondata)
#ERROR >> json.decoder.JSONDecodeError: Expecting ',' delimiter: line 1 column 556 (char 555)
#print(headerObj)["server"]
#print(headerObj)['content-length']
#print(headerObj)['content-encoding']
#print(headerObj)['content-type']
#print(headerObj)['date']
#print(headerObj)['x-powered-by']
## I could not the problem.
|
normal
|
{
"blob_id": "7404dd324d54bb072e56985716bbae746b4dd219",
"index": 1395,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(jsondata)\n",
"step-3": "<mask token>\nr = requests.get('http://pythonspot.com/')\njsondata = str(r.headers).replace(\"'\", '\"')\nprint(jsondata)\n",
"step-4": "import requests\nimport json\nr = requests.get('http://pythonspot.com/')\njsondata = str(r.headers).replace(\"'\", '\"')\nprint(jsondata)\n",
"step-5": "import requests\nimport json\nr = requests.get('http://pythonspot.com/')\n\njsondata = str(r.headers).replace(\"'\", '\"')\nprint(jsondata)\n#headerObj = json.loads(jsondata)\n#ERROR >> json.decoder.JSONDecodeError: Expecting ',' delimiter: line 1 column 556 (char 555)\n\n#print(headerObj)[\"server\"]\n#print(headerObj)['content-length']\n#print(headerObj)['content-encoding']\n#print(headerObj)['content-type']\n#print(headerObj)['date']\n#print(headerObj)['x-powered-by']\n\n## I could not the problem.",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def fibonacci(n):
'''returns the nth number of the Fibonacci
sequence. where the first position is indexed at 0.
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less than 2 then just get that number from the list.
if n <= 1:
#return list item at n
return fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return fib[1]
def lucas(n):
'''returns the nth number of the Lucas
sequence. where the first position is indexed at 0
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
return luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return luke[1]
def sum_series(n, x = 0, y = 1):
'''sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''
###Fibonacci sequence calculator....
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
fibnum = fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
fibnum = fib[1]
###Lucas sequence calculator...
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
lukenum = luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
lukenum = luke[1]
###Foo sequence
#these are the first two numbers in the foo sequence.
foo = [3,2]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
foonum = foo[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = foo[0] + foo[1]
#shift all the numbers in the list one position to the left.
foo = [foo[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x==3 and y ==2:
return foonum
else:
return fibnum
|
normal
|
{
"blob_id": "ca75e23d91eef8a5c5b78c0ea7c903b80640af25",
"index": 7957,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-3": "<mask token>\n\n\ndef lucas(n):\n \"\"\"returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0\"\"\"\n luke = [2, 1]\n if n <= 1:\n return luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n return luke[1]\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-4": "def fibonacci(n):\n \"\"\"returns the nth number of the Fibonacci\n sequence. where the first position is indexed at 0.\n n must be an iteger greater than or equal to 0\"\"\"\n fib = [0, 1]\n if n <= 1:\n return fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n return fib[1]\n\n\ndef lucas(n):\n \"\"\"returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0\"\"\"\n luke = [2, 1]\n if n <= 1:\n return luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n return luke[1]\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-5": "def fibonacci(n):\n '''returns the nth number of the Fibonacci\n sequence. where the first position is indexed at 0.\n n must be an iteger greater than or equal to 0'''\n #these are the first two numbers in the sequence.\n fib = [0,1]\n #If the users enters a number less than 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n return fib[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = fib[0] + fib[1]\n #shift all the numbers in the list one position to the left.\n fib = [fib[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n return fib[1]\n \ndef lucas(n):\n '''returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0'''\n #these are the first two numbers in the Lucas sequence.\n luke = [2,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n return luke[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = luke[0] + luke[1]\n #shift all the numbers in the list one position to the left.\n luke = [luke[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n return luke[1]\n \n\n\ndef sum_series(n, x = 0, y = 1):\n\n '''sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''\n \n ###Fibonacci sequence calculator....\n #these are the first two numbers in the sequence.\n fib = [0,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n fibnum = fib[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = fib[0] + fib[1]\n #shift all the numbers in the list one position to the left.\n fib = [fib[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n fibnum = fib[1] \n ###Lucas sequence calculator...\n #these are the first two numbers in the Lucas sequence.\n luke = [2,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n lukenum = luke[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = luke[0] + luke[1]\n #shift all the numbers in the list one position to the left.\n luke = [luke[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n lukenum = luke[1] \n \n ###Foo sequence\n #these are the first two numbers in the foo sequence.\n foo = [3,2]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n foonum = foo[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = foo[0] + foo[1]\n #shift all the numbers in the list one position to the left.\n foo = [foo[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n foonum = foo[1] \n \n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x==3 and y ==2:\n return foonum\n else:\n return fibnum",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def times2(x):
return x * 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(df.head())
<|reserved_special_token_0|>
print('========================')
print(newdf)
def times2(x):
return x * 2
print('========================')
print(df['col1'].apply(times2))
print('========================')
print(df.sort_values(by='col2'))
print('========================')
print(df)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [444, 555, 666, 444],
'col3': ['abc', 'def', 'ghi', 'xyz']})
print(df.head())
newdf = df[(df['col1'] > 0) & (df['col2'] == 444)]
print('========================')
print(newdf)
def times2(x):
return x * 2
print('========================')
print(df['col1'].apply(times2))
print('========================')
print(df.sort_values(by='col2'))
print('========================')
print(df)
<|reserved_special_token_1|>
import pandas as pd
df = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [444, 555, 666, 444],
'col3': ['abc', 'def', 'ghi', 'xyz']})
print(df.head())
newdf = df[(df['col1'] > 0) & (df['col2'] == 444)]
print('========================')
print(newdf)
def times2(x):
return x * 2
print('========================')
print(df['col1'].apply(times2))
print('========================')
print(df.sort_values(by='col2'))
print('========================')
print(df)
<|reserved_special_token_1|>
import pandas as pd
df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})
print(df.head())
#print(df['col2'].unique())
#print(df['col1'] > 2)
newdf = df[(df['col1']>0) & (df['col2'] == 444)]
print("========================")
print(newdf)
def times2(x):
return x*2
print("========================")
print(df['col1'].apply(times2))
print("========================")
print(df.sort_values(by='col2'))
print("========================")
print(df)
|
flexible
|
{
"blob_id": "422a4945ebf453d3e09e9e7e76dd32b30488680e",
"index": 3011,
"step-1": "<mask token>\n\n\ndef times2(x):\n return x * 2\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(df.head())\n<mask token>\nprint('========================')\nprint(newdf)\n\n\ndef times2(x):\n return x * 2\n\n\nprint('========================')\nprint(df['col1'].apply(times2))\nprint('========================')\nprint(df.sort_values(by='col2'))\nprint('========================')\nprint(df)\n",
"step-3": "<mask token>\ndf = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [444, 555, 666, 444],\n 'col3': ['abc', 'def', 'ghi', 'xyz']})\nprint(df.head())\nnewdf = df[(df['col1'] > 0) & (df['col2'] == 444)]\nprint('========================')\nprint(newdf)\n\n\ndef times2(x):\n return x * 2\n\n\nprint('========================')\nprint(df['col1'].apply(times2))\nprint('========================')\nprint(df.sort_values(by='col2'))\nprint('========================')\nprint(df)\n",
"step-4": "import pandas as pd\ndf = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [444, 555, 666, 444],\n 'col3': ['abc', 'def', 'ghi', 'xyz']})\nprint(df.head())\nnewdf = df[(df['col1'] > 0) & (df['col2'] == 444)]\nprint('========================')\nprint(newdf)\n\n\ndef times2(x):\n return x * 2\n\n\nprint('========================')\nprint(df['col1'].apply(times2))\nprint('========================')\nprint(df.sort_values(by='col2'))\nprint('========================')\nprint(df)\n",
"step-5": "import pandas as pd\ndf = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})\nprint(df.head())\n#print(df['col2'].unique())\n#print(df['col1'] > 2)\nnewdf = df[(df['col1']>0) & (df['col2'] == 444)]\nprint(\"========================\")\nprint(newdf)\n\ndef times2(x):\n return x*2\n\nprint(\"========================\")\nprint(df['col1'].apply(times2))\n\nprint(\"========================\")\nprint(df.sort_values(by='col2'))\nprint(\"========================\")\nprint(df)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def min_hacks(d, p):
shots = [0]
damage = 0
for c in p:
if c == 'S':
shots[-1] += 1
damage += 2 ** (len(shots) - 1)
else:
shots.append(0)
hacks = 0
while damage > d:
hacked = False
for i in range(len(shots) - 1, 0, -1):
if shots[i] > 0:
shots[i] -= 1
shots[i - 1] += 1
damage -= 2 ** (i - 1)
hacks += 1
hacked = True
break
if not hacked:
return -1
return hacks
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def min_hacks(d, p):
shots = [0]
damage = 0
for c in p:
if c == 'S':
shots[-1] += 1
damage += 2 ** (len(shots) - 1)
else:
shots.append(0)
hacks = 0
while damage > d:
hacked = False
for i in range(len(shots) - 1, 0, -1):
if shots[i] > 0:
shots[i] -= 1
shots[i - 1] += 1
damage -= 2 ** (i - 1)
hacks += 1
hacked = True
break
if not hacked:
return -1
return hacks
<|reserved_special_token_0|>
for i in range(1, num_cases + 1):
current_case = input().split()
d = int(current_case[0])
p = current_case[1]
solution = min_hacks(d, p)
if solution < 0:
solution_string = 'IMPOSSIBLE'
else:
solution_string = str(solution)
print('Case #{:d}: {:s}'.format(i, solution_string))
<|reserved_special_token_1|>
def min_hacks(d, p):
shots = [0]
damage = 0
for c in p:
if c == 'S':
shots[-1] += 1
damage += 2 ** (len(shots) - 1)
else:
shots.append(0)
hacks = 0
while damage > d:
hacked = False
for i in range(len(shots) - 1, 0, -1):
if shots[i] > 0:
shots[i] -= 1
shots[i - 1] += 1
damage -= 2 ** (i - 1)
hacks += 1
hacked = True
break
if not hacked:
return -1
return hacks
num_cases = int(input())
for i in range(1, num_cases + 1):
current_case = input().split()
d = int(current_case[0])
p = current_case[1]
solution = min_hacks(d, p)
if solution < 0:
solution_string = 'IMPOSSIBLE'
else:
solution_string = str(solution)
print('Case #{:d}: {:s}'.format(i, solution_string))
<|reserved_special_token_1|>
# Return min number of hacks (swap of adjacent instructions)
# in p so that total damage <= d.
# If impossible, return -1
def min_hacks(d, p):
# list containing number of shoot commands per
# damage level. Each element is represents a
# damage level; 1, 2, 4, 8, ... and so on.
shots = [0]
damage = 0
for c in p:
if c == "S":
shots[-1] += 1
# we can also calculate damage here.
damage += 2 ** (len(shots) - 1)
else:
shots.append(0)
# each hack represents moving 1 shot down 1 element
# in the shots list. So keep doing this until
# damage is <= d.
hacks = 0
while damage > d:
# move 1 shot from highest element possible down 1 element.
hacked = False
for i in range(len(shots)-1, 0, -1):
if shots[i] > 0:
shots[i] -= 1
shots[i-1] += 1
damage -= 2 ** (i - 1) # damage = damage - 2**i + 2**(i-1)
hacks += 1
hacked = True
break
if not hacked:
# impossible to get damage <= d!
return -1
return hacks
num_cases = int(input())
for i in range(1, num_cases+1):
current_case = input().split()
d = int(current_case[0])
p = current_case[1]
solution = min_hacks(d, p)
if solution < 0:
solution_string = "IMPOSSIBLE"
else:
solution_string = str(solution)
print("Case #{:d}: {:s}".format(i, solution_string))
|
flexible
|
{
"blob_id": "607700faebc2018327d66939419cc24a563c3900",
"index": 6515,
"step-1": "<mask token>\n",
"step-2": "def min_hacks(d, p):\n shots = [0]\n damage = 0\n for c in p:\n if c == 'S':\n shots[-1] += 1\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n hacks = 0\n while damage > d:\n hacked = False\n for i in range(len(shots) - 1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i - 1] += 1\n damage -= 2 ** (i - 1)\n hacks += 1\n hacked = True\n break\n if not hacked:\n return -1\n return hacks\n\n\n<mask token>\n",
"step-3": "def min_hacks(d, p):\n shots = [0]\n damage = 0\n for c in p:\n if c == 'S':\n shots[-1] += 1\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n hacks = 0\n while damage > d:\n hacked = False\n for i in range(len(shots) - 1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i - 1] += 1\n damage -= 2 ** (i - 1)\n hacks += 1\n hacked = True\n break\n if not hacked:\n return -1\n return hacks\n\n\n<mask token>\nfor i in range(1, num_cases + 1):\n current_case = input().split()\n d = int(current_case[0])\n p = current_case[1]\n solution = min_hacks(d, p)\n if solution < 0:\n solution_string = 'IMPOSSIBLE'\n else:\n solution_string = str(solution)\n print('Case #{:d}: {:s}'.format(i, solution_string))\n",
"step-4": "def min_hacks(d, p):\n shots = [0]\n damage = 0\n for c in p:\n if c == 'S':\n shots[-1] += 1\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n hacks = 0\n while damage > d:\n hacked = False\n for i in range(len(shots) - 1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i - 1] += 1\n damage -= 2 ** (i - 1)\n hacks += 1\n hacked = True\n break\n if not hacked:\n return -1\n return hacks\n\n\nnum_cases = int(input())\nfor i in range(1, num_cases + 1):\n current_case = input().split()\n d = int(current_case[0])\n p = current_case[1]\n solution = min_hacks(d, p)\n if solution < 0:\n solution_string = 'IMPOSSIBLE'\n else:\n solution_string = str(solution)\n print('Case #{:d}: {:s}'.format(i, solution_string))\n",
"step-5": "# Return min number of hacks (swap of adjacent instructions)\n# in p so that total damage <= d.\n# If impossible, return -1\ndef min_hacks(d, p):\n\n # list containing number of shoot commands per\n # damage level. Each element is represents a\n # damage level; 1, 2, 4, 8, ... and so on.\n shots = [0]\n damage = 0\n for c in p:\n if c == \"S\":\n shots[-1] += 1\n # we can also calculate damage here.\n damage += 2 ** (len(shots) - 1)\n else:\n shots.append(0)\n\n # each hack represents moving 1 shot down 1 element\n # in the shots list. So keep doing this until\n # damage is <= d.\n hacks = 0\n while damage > d:\n # move 1 shot from highest element possible down 1 element.\n hacked = False\n for i in range(len(shots)-1, 0, -1):\n if shots[i] > 0:\n shots[i] -= 1\n shots[i-1] += 1\n damage -= 2 ** (i - 1) # damage = damage - 2**i + 2**(i-1)\n hacks += 1\n hacked = True\n break\n\n if not hacked:\n # impossible to get damage <= d!\n return -1\n\n return hacks\n\nnum_cases = int(input())\nfor i in range(1, num_cases+1):\n current_case = input().split()\n d = int(current_case[0])\n p = current_case[1]\n solution = min_hacks(d, p)\n if solution < 0:\n solution_string = \"IMPOSSIBLE\"\n else:\n solution_string = str(solution)\n print(\"Case #{:d}: {:s}\".format(i, solution_string))\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Resource:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Resource:
def __init__(self, row: tuple):
self.video_path = row[0]
self.pic_path = row[1]
<|reserved_special_token_1|>
from connect import Connect
class Resource:
def __init__(self, row: tuple):
self.video_path = row[0]
self.pic_path = row[1]
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from connect import Connect
class Resource:
def __init__(self, row: tuple):
self.video_path = row[0]
self.pic_path = row[1]
|
flexible
|
{
"blob_id": "65aa27addaec6014fe5fd66df2c0d3632231a314",
"index": 3124,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Resource:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Resource:\n\n def __init__(self, row: tuple):\n self.video_path = row[0]\n self.pic_path = row[1]\n",
"step-4": "from connect import Connect\n\n\nclass Resource:\n\n def __init__(self, row: tuple):\n self.video_path = row[0]\n self.pic_path = row[1]\n",
"step-5": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom connect import Connect\n\n\nclass Resource:\n def __init__(self, row: tuple):\n self.video_path = row[0]\n self.pic_path = row[1]\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
import sys
from tests.jep_pipe import jep_pipe
from tests.jep_pipe import build_java_process_cmd
import jep
@unittest.skipIf(sys.platform.startswith("win"), "subprocess complications on Windows")
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')
def test_numpy_prod_succeeds(self):
jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')
def test_numpy_array_to_string(self):
jep_pipe(build_java_process_cmd(
'jep.test.numpy.TestNumpyArrayToString'))
|
normal
|
{
"blob_id": "39bc90f34cccebe9a8b1475e396caa1c14f6b2df",
"index": 9004,
"step-1": "<mask token>\n\n\n@unittest.skipIf(sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\n@unittest.skipIf(sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n <mask token>\n",
"step-3": "<mask token>\n\n\n@unittest.skipIf(sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-4": "import unittest\nimport sys\nfrom tests.jep_pipe import jep_pipe\nfrom tests.jep_pipe import build_java_process_cmd\nimport jep\n\n\n@unittest.skipIf(sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-5": "import unittest\nimport sys\nfrom tests.jep_pipe import jep_pipe\nfrom tests.jep_pipe import build_java_process_cmd\nimport jep\n\n\n@unittest.skipIf(sys.platform.startswith(\"win\"), \"subprocess complications on Windows\")\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
# DATE 2018-08-21
# AUTHER = tongzz
#
import MySQLdb
from Elements.LoginElements import *
import datetime
import sys
class Tradepasswd():
def __init__(self):
self.db_config={
'host': '172.28.38.59',
'usr': 'mysqladmin',
'passwd': '123465',
'port': '3306',
'db': 'hdb'
}
def tradePasswd(self):
try:
conn = MySQLdb.connect(host=self.db_config['host'],user=self.db_config['usr'],passwd=self.db_config['passwd'],db=self.db_config['db'])
conn.autocommit(True)
curr = conn.cursor()
curr.execute("SET NAMES utf8")
curr.execute("USE %s"% self.db_config['db'])
# print u"******************** 操作数据库对象成功 ********************"
# return conn,curr
tradepasswd_sql = "UPDATE member set trade_pwd = NULL where uname = " + username + ";"
curr.execute(tradepasswd_sql)
# curr.fetchone()
print u"恢复交易密码成功"
curr.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d:%s"%(e.args[0],e.args[1])
return tradepasswd_sql
|
normal
|
{
"blob_id": "ed66e8028d653cf6b7ea4703fef5a658665c48db",
"index": 1034,
"step-1": "# -*- coding: utf-8 -*-\r\n# DATE 2018-08-21\r\n# AUTHER = tongzz\r\n#\r\n\r\nimport MySQLdb\r\nfrom Elements.LoginElements import *\r\nimport datetime\r\nimport sys\r\nclass Tradepasswd():\r\n def __init__(self):\r\n self.db_config={\r\n 'host': '172.28.38.59',\r\n 'usr': 'mysqladmin',\r\n 'passwd': '123465',\r\n 'port': '3306',\r\n 'db': 'hdb'\r\n }\r\n def tradePasswd(self):\r\n try:\r\n conn = MySQLdb.connect(host=self.db_config['host'],user=self.db_config['usr'],passwd=self.db_config['passwd'],db=self.db_config['db'])\r\n conn.autocommit(True)\r\n curr = conn.cursor()\r\n curr.execute(\"SET NAMES utf8\")\r\n curr.execute(\"USE %s\"% self.db_config['db'])\r\n # print u\"******************** 操作数据库对象成功 ********************\"\r\n # return conn,curr\r\n tradepasswd_sql = \"UPDATE member set trade_pwd = NULL where uname = \" + username + \";\"\r\n curr.execute(tradepasswd_sql)\r\n # curr.fetchone()\r\n print u\"恢复交易密码成功\"\r\n curr.close()\r\n conn.close()\r\n except MySQLdb.Error,e:\r\n print \"Mysql Error %d:%s\"%(e.args[0],e.args[1])\r\n return tradepasswd_sql\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def home(request):
return render(request, 'home.html')
<|reserved_special_token_0|>
def docs(request):
return render(request, 'docs.html')
<|reserved_special_token_0|>
def publications(request):
return render(request, 'publications.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def home(request):
return render(request, 'home.html')
<|reserved_special_token_0|>
def docs(request):
return render(request, 'docs.html')
<|reserved_special_token_0|>
def publications(request):
return render(request, 'publications.html')
def access(request):
return render(request, 'access.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def home(request):
return render(request, 'home.html')
<|reserved_special_token_0|>
def docs(request):
return render(request, 'docs.html')
def gallery(request, page=None):
if page:
return render(request, 'gallery_' + str(page) + '.html')
return render(request, 'gallery.html')
def publications(request):
return render(request, 'publications.html')
def access(request):
return render(request, 'access.html')
<|reserved_special_token_1|>
from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request, 'home.html')
def people(request):
return render(request, 'people.html')
def docs(request):
return render(request, 'docs.html')
def gallery(request, page=None):
if page:
return render(request, 'gallery_' + str(page) + '.html')
return render(request, 'gallery.html')
def publications(request):
return render(request, 'publications.html')
def access(request):
return render(request, 'access.html')
|
flexible
|
{
"blob_id": "f7a493ab8e9845d0e9da33a0ee45d7c3ef66deb5",
"index": 7507,
"step-1": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\n<mask token>\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\n<mask token>\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\ndef access(request):\n return render(request, 'access.html')\n",
"step-3": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\ndef gallery(request, page=None):\n if page:\n return render(request, 'gallery_' + str(page) + '.html')\n return render(request, 'gallery.html')\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\ndef access(request):\n return render(request, 'access.html')\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\ndef people(request):\n return render(request, 'people.html')\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\ndef gallery(request, page=None):\n if page:\n return render(request, 'gallery_' + str(page) + '.html')\n return render(request, 'gallery.html')\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\ndef access(request):\n return render(request, 'access.html')\n",
"step-5": null,
"step-ids": [
3,
4,
5,
7
]
}
|
[
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from Get2Gether.api_routes.schedule import schedule_router
from Get2Gether.api_routes.auth import auth_router
from Get2Gether.api_routes.event import event_router
|
flexible
|
{
"blob_id": "cd9d10a3ee3956762d88e76a951023dd77023942",
"index": 6411,
"step-1": "<mask token>\n",
"step-2": "from Get2Gether.api_routes.schedule import schedule_router\nfrom Get2Gether.api_routes.auth import auth_router\nfrom Get2Gether.api_routes.event import event_router\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from MultisizerReader import MultiSizerReader
import os
import matplotlib.pyplot as plt
#Get all spread sheet files in fodler and create multisizer files for each
folder = "./Data_Organised/DilutionTestingLowOD"
allFiles = os.listdir(folder)
multiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i].endswith(".XLS")]
data = []
for files in multiSizerFiles:
data.append(MultiSizerReader(path=os.path.join(folder,files)))
#split files into YD133 and YD133 + PWR20
ODs = []
labels = []
dilutions =[]
for d in data:
OD = d.name.split("_")[4] + "." + d.name.split("_")[5]
if d.name.split("_")[2] == "5":
dilutions.append("$10^5$")
labels.append("$10^5$ OD: {}".format(OD))
if d.name.split("_")[2] == "7":
dilutions.append("$10^7$")
labels.append("$10^7$ OD: {}".format(OD))
ODs.append(float(OD))
fig, ax = plt.subplots(nrows=1,ncols=2,figsize=(14,9))
combinedData,combinedTypes,combinedLabels = MultiSizerReader.sumByGroup(data,ODs,labels)
MultiSizerReader.plotData(combinedData,combinedTypes,labels=combinedLabels,logAxis=False,legend=True,title="OD ~ 0.05",logNormalFits=False,xLims=(0.4,4),colorScale=False,smoothing=5,showStats=False,ax=ax[0],text=False,cbarLabel="$\mathbf{OD_{600}}$")
#Get all spread sheet files in fodler and create multisizer files for each
folder = "./Data_Organised/DilutionTestingHighOD"
allFiles = os.listdir(folder)
multiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i].endswith(".XLS")]
data = []
for files in multiSizerFiles:
data.append(MultiSizerReader(path=os.path.join(folder,files)))
#split files into YD133 and YD133 + PWR20
ODs = []
labels = []
dilutions =[]
for d in data:
OD = d.name.split("_")[4] + "." + d.name.split("_")[5]
if d.name.split("_")[2] == "5":
dilutions.append("$10^5$")
labels.append("$10^5$ OD: {}".format(OD))
if d.name.split("_")[2] == "7":
dilutions.append("$10^7$")
labels.append("$10^7$ OD: {}".format(OD))
ODs.append(float(OD))
combinedData,combinedTypes,combinedLabels = MultiSizerReader.sumByGroup(data,ODs,labels)
MultiSizerReader.plotData(combinedData,combinedTypes,labels=combinedLabels,logAxis=False,legend=True,title="OD ~ 0.2",logNormalFits=False,xLims=(0.4,4),colorScale=False,smoothing=5,showStats=False,ax=ax[1],text=False,cbarLabel="$\mathbf{OD_{600}}$")
ax[0].text(0.03, 0.93 , "A", transform=ax[0].transAxes, size=35, weight='bold',color="k")
ax[1].text(0.03, 0.93 , "B", transform=ax[1].transAxes, size=35, weight='bold',color="k")
ax[0].legend(fontsize="xx-large")
ax[1].legend(fontsize="xx-large")
fig.tight_layout()
plt.show()
|
normal
|
{
"blob_id": "2f0aa1f294f34a4f3ffb47c15ab74fc792765f10",
"index": 9195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder, files)))\n<mask token>\nfor d in data:\n OD = d.name.split('_')[4] + '.' + d.name.split('_')[5]\n if d.name.split('_')[2] == '5':\n dilutions.append('$10^5$')\n labels.append('$10^5$ OD: {}'.format(OD))\n if d.name.split('_')[2] == '7':\n dilutions.append('$10^7$')\n labels.append('$10^7$ OD: {}'.format(OD))\n ODs.append(float(OD))\n<mask token>\nMultiSizerReader.plotData(combinedData, combinedTypes, labels=\n combinedLabels, logAxis=False, legend=True, title='OD ~ 0.05',\n logNormalFits=False, xLims=(0.4, 4), colorScale=False, smoothing=5,\n showStats=False, ax=ax[0], text=False, cbarLabel='$\\\\mathbf{OD_{600}}$')\n<mask token>\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder, files)))\n<mask token>\nfor d in data:\n OD = d.name.split('_')[4] + '.' + d.name.split('_')[5]\n if d.name.split('_')[2] == '5':\n dilutions.append('$10^5$')\n labels.append('$10^5$ OD: {}'.format(OD))\n if d.name.split('_')[2] == '7':\n dilutions.append('$10^7$')\n labels.append('$10^7$ OD: {}'.format(OD))\n ODs.append(float(OD))\n<mask token>\nMultiSizerReader.plotData(combinedData, combinedTypes, labels=\n combinedLabels, logAxis=False, legend=True, title='OD ~ 0.2',\n logNormalFits=False, xLims=(0.4, 4), colorScale=False, smoothing=5,\n showStats=False, ax=ax[1], text=False, cbarLabel='$\\\\mathbf{OD_{600}}$')\nax[0].text(0.03, 0.93, 'A', transform=ax[0].transAxes, size=35, weight=\n 'bold', color='k')\nax[1].text(0.03, 0.93, 'B', transform=ax[1].transAxes, size=35, weight=\n 'bold', color='k')\nax[0].legend(fontsize='xx-large')\nax[1].legend(fontsize='xx-large')\nfig.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nfolder = './Data_Organised/DilutionTestingLowOD'\nallFiles = os.listdir(folder)\nmultiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i]\n .endswith('.XLS')]\ndata = []\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder, files)))\nODs = []\nlabels = []\ndilutions = []\nfor d in data:\n OD = d.name.split('_')[4] + '.' + d.name.split('_')[5]\n if d.name.split('_')[2] == '5':\n dilutions.append('$10^5$')\n labels.append('$10^5$ OD: {}'.format(OD))\n if d.name.split('_')[2] == '7':\n dilutions.append('$10^7$')\n labels.append('$10^7$ OD: {}'.format(OD))\n ODs.append(float(OD))\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(14, 9))\ncombinedData, combinedTypes, combinedLabels = MultiSizerReader.sumByGroup(data,\n ODs, labels)\nMultiSizerReader.plotData(combinedData, combinedTypes, labels=\n combinedLabels, logAxis=False, legend=True, title='OD ~ 0.05',\n logNormalFits=False, xLims=(0.4, 4), colorScale=False, smoothing=5,\n showStats=False, ax=ax[0], text=False, cbarLabel='$\\\\mathbf{OD_{600}}$')\nfolder = './Data_Organised/DilutionTestingHighOD'\nallFiles = os.listdir(folder)\nmultiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i]\n .endswith('.XLS')]\ndata = []\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder, files)))\nODs = []\nlabels = []\ndilutions = []\nfor d in data:\n OD = d.name.split('_')[4] + '.' + d.name.split('_')[5]\n if d.name.split('_')[2] == '5':\n dilutions.append('$10^5$')\n labels.append('$10^5$ OD: {}'.format(OD))\n if d.name.split('_')[2] == '7':\n dilutions.append('$10^7$')\n labels.append('$10^7$ OD: {}'.format(OD))\n ODs.append(float(OD))\ncombinedData, combinedTypes, combinedLabels = MultiSizerReader.sumByGroup(data,\n ODs, labels)\nMultiSizerReader.plotData(combinedData, combinedTypes, labels=\n combinedLabels, logAxis=False, legend=True, title='OD ~ 0.2',\n logNormalFits=False, xLims=(0.4, 4), colorScale=False, smoothing=5,\n showStats=False, ax=ax[1], text=False, cbarLabel='$\\\\mathbf{OD_{600}}$')\nax[0].text(0.03, 0.93, 'A', transform=ax[0].transAxes, size=35, weight=\n 'bold', color='k')\nax[1].text(0.03, 0.93, 'B', transform=ax[1].transAxes, size=35, weight=\n 'bold', color='k')\nax[0].legend(fontsize='xx-large')\nax[1].legend(fontsize='xx-large')\nfig.tight_layout()\nplt.show()\n",
"step-4": "from MultisizerReader import MultiSizerReader\nimport os\nimport matplotlib.pyplot as plt\nfolder = './Data_Organised/DilutionTestingLowOD'\nallFiles = os.listdir(folder)\nmultiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i]\n .endswith('.XLS')]\ndata = []\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder, files)))\nODs = []\nlabels = []\ndilutions = []\nfor d in data:\n OD = d.name.split('_')[4] + '.' + d.name.split('_')[5]\n if d.name.split('_')[2] == '5':\n dilutions.append('$10^5$')\n labels.append('$10^5$ OD: {}'.format(OD))\n if d.name.split('_')[2] == '7':\n dilutions.append('$10^7$')\n labels.append('$10^7$ OD: {}'.format(OD))\n ODs.append(float(OD))\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(14, 9))\ncombinedData, combinedTypes, combinedLabels = MultiSizerReader.sumByGroup(data,\n ODs, labels)\nMultiSizerReader.plotData(combinedData, combinedTypes, labels=\n combinedLabels, logAxis=False, legend=True, title='OD ~ 0.05',\n logNormalFits=False, xLims=(0.4, 4), colorScale=False, smoothing=5,\n showStats=False, ax=ax[0], text=False, cbarLabel='$\\\\mathbf{OD_{600}}$')\nfolder = './Data_Organised/DilutionTestingHighOD'\nallFiles = os.listdir(folder)\nmultiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i]\n .endswith('.XLS')]\ndata = []\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder, files)))\nODs = []\nlabels = []\ndilutions = []\nfor d in data:\n OD = d.name.split('_')[4] + '.' + d.name.split('_')[5]\n if d.name.split('_')[2] == '5':\n dilutions.append('$10^5$')\n labels.append('$10^5$ OD: {}'.format(OD))\n if d.name.split('_')[2] == '7':\n dilutions.append('$10^7$')\n labels.append('$10^7$ OD: {}'.format(OD))\n ODs.append(float(OD))\ncombinedData, combinedTypes, combinedLabels = MultiSizerReader.sumByGroup(data,\n ODs, labels)\nMultiSizerReader.plotData(combinedData, combinedTypes, labels=\n combinedLabels, logAxis=False, legend=True, title='OD ~ 0.2',\n logNormalFits=False, xLims=(0.4, 4), colorScale=False, smoothing=5,\n showStats=False, ax=ax[1], text=False, cbarLabel='$\\\\mathbf{OD_{600}}$')\nax[0].text(0.03, 0.93, 'A', transform=ax[0].transAxes, size=35, weight=\n 'bold', color='k')\nax[1].text(0.03, 0.93, 'B', transform=ax[1].transAxes, size=35, weight=\n 'bold', color='k')\nax[0].legend(fontsize='xx-large')\nax[1].legend(fontsize='xx-large')\nfig.tight_layout()\nplt.show()\n",
"step-5": "from MultisizerReader import MultiSizerReader\nimport os\nimport matplotlib.pyplot as plt\n\n#Get all spread sheet files in fodler and create multisizer files for each\nfolder = \"./Data_Organised/DilutionTestingLowOD\"\nallFiles = os.listdir(folder)\nmultiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i].endswith(\".XLS\")]\ndata = []\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder,files)))\n\n#split files into YD133 and YD133 + PWR20\nODs = []\nlabels = []\ndilutions =[]\nfor d in data:\n OD = d.name.split(\"_\")[4] + \".\" + d.name.split(\"_\")[5]\n if d.name.split(\"_\")[2] == \"5\":\n dilutions.append(\"$10^5$\")\n labels.append(\"$10^5$ OD: {}\".format(OD))\n if d.name.split(\"_\")[2] == \"7\":\n dilutions.append(\"$10^7$\")\n labels.append(\"$10^7$ OD: {}\".format(OD))\n ODs.append(float(OD))\n\nfig, ax = plt.subplots(nrows=1,ncols=2,figsize=(14,9))\n\ncombinedData,combinedTypes,combinedLabels = MultiSizerReader.sumByGroup(data,ODs,labels)\nMultiSizerReader.plotData(combinedData,combinedTypes,labels=combinedLabels,logAxis=False,legend=True,title=\"OD ~ 0.05\",logNormalFits=False,xLims=(0.4,4),colorScale=False,smoothing=5,showStats=False,ax=ax[0],text=False,cbarLabel=\"$\\mathbf{OD_{600}}$\")\n\n#Get all spread sheet files in fodler and create multisizer files for each\nfolder = \"./Data_Organised/DilutionTestingHighOD\"\nallFiles = os.listdir(folder)\nmultiSizerFiles = [allFiles[i] for i in range(len(allFiles)) if allFiles[i].endswith(\".XLS\")]\ndata = []\nfor files in multiSizerFiles:\n data.append(MultiSizerReader(path=os.path.join(folder,files)))\n\n#split files into YD133 and YD133 + PWR20\nODs = []\nlabels = []\ndilutions =[]\nfor d in data:\n OD = d.name.split(\"_\")[4] + \".\" + d.name.split(\"_\")[5]\n if d.name.split(\"_\")[2] == \"5\":\n dilutions.append(\"$10^5$\")\n labels.append(\"$10^5$ OD: {}\".format(OD))\n if d.name.split(\"_\")[2] == \"7\":\n dilutions.append(\"$10^7$\")\n labels.append(\"$10^7$ OD: {}\".format(OD))\n ODs.append(float(OD))\n\n\n\n\ncombinedData,combinedTypes,combinedLabels = MultiSizerReader.sumByGroup(data,ODs,labels)\nMultiSizerReader.plotData(combinedData,combinedTypes,labels=combinedLabels,logAxis=False,legend=True,title=\"OD ~ 0.2\",logNormalFits=False,xLims=(0.4,4),colorScale=False,smoothing=5,showStats=False,ax=ax[1],text=False,cbarLabel=\"$\\mathbf{OD_{600}}$\")\n\n\n\n\n\n\n\n\n\nax[0].text(0.03, 0.93 , \"A\", transform=ax[0].transAxes, size=35, weight='bold',color=\"k\")\nax[1].text(0.03, 0.93 , \"B\", transform=ax[1].transAxes, size=35, weight='bold',color=\"k\")\nax[0].legend(fontsize=\"xx-large\")\nax[1].legend(fontsize=\"xx-large\")\nfig.tight_layout()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from whoosh.fields import TEXT, ID, Schema
bw2_schema = Schema(
name=TEXT(stored=True, sortable=True),
comment=TEXT(stored=True),
product=TEXT(stored=True, sortable=True),
categories=TEXT(stored=True),
location=TEXT(stored=True, sortable=True),
database=TEXT(stored=True),
code=ID(unique=True, stored=True),
)
|
normal
|
{
"blob_id": "07aafcb3db9c57ad09a29a827d72744ef0d22247",
"index": 3319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbw2_schema = Schema(name=TEXT(stored=True, sortable=True), comment=TEXT(\n stored=True), product=TEXT(stored=True, sortable=True), categories=TEXT\n (stored=True), location=TEXT(stored=True, sortable=True), database=TEXT\n (stored=True), code=ID(unique=True, stored=True))\n",
"step-3": "from __future__ import print_function, unicode_literals\nfrom eight import *\nfrom whoosh.fields import TEXT, ID, Schema\nbw2_schema = Schema(name=TEXT(stored=True, sortable=True), comment=TEXT(\n stored=True), product=TEXT(stored=True, sortable=True), categories=TEXT\n (stored=True), location=TEXT(stored=True, sortable=True), database=TEXT\n (stored=True), code=ID(unique=True, stored=True))\n",
"step-4": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals\nfrom eight import *\n\nfrom whoosh.fields import TEXT, ID, Schema\n\nbw2_schema = Schema(\n name=TEXT(stored=True, sortable=True),\n comment=TEXT(stored=True),\n product=TEXT(stored=True, sortable=True),\n categories=TEXT(stored=True),\n location=TEXT(stored=True, sortable=True),\n database=TEXT(stored=True),\n code=ID(unique=True, stored=True),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
x = int(input("Enter number:"))
y = x/2
print(y)
for i in
|
normal
|
{
"blob_id": "79c6b7c3d23248f249b55af1d097a66a78a2c22f",
"index": 9164,
"step-1": "x = int(input(\"Enter number:\"))\ny = x/2\nprint(y)\n\nfor i in \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
A customised logger for this project for logging to the file and console
Created on 29/07/2022
@author: PNimbhore
"""
# imports
import os
import logging
class Logger:
"""
A custom logger which will take care
of logging to console and file.
"""
def __init__(self, filepath):
"""
Constructor
:param filepath:
"""
self.filepath = filepath
self.logger = logging.getLogger('util')
self.logger.setLevel(logging.DEBUG)
self._formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# file handler
file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')
file_handller.setLevel(logging.DEBUG)
file_handller.setFormatter(self._formatter)
self.logger.addHandler(file_handller)
# console handler
con_handler = logging.StreamHandler()
con_handler.setLevel(logging.ERROR)
con_handler.setFormatter(self._formatter)
self.logger.addHandler(con_handler)
log_file = "slb_config.log"
logger = Logger(log_file).logger
|
normal
|
{
"blob_id": "45d57f8392b89776f9349c32b4bb2fa71a4aaa83",
"index": 8610,
"step-1": "<mask token>\n\n\nclass Logger:\n <mask token>\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\nlog_file = 'slb_config.log'\nlogger = Logger(log_file).logger\n",
"step-4": "<mask token>\nimport os\nimport logging\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\nlog_file = 'slb_config.log'\nlogger = Logger(log_file).logger\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nA customised logger for this project for logging to the file and console\nCreated on 29/07/2022\n@author: PNimbhore\n\"\"\"\n# imports\nimport os\nimport logging\n\n\nclass Logger:\n \"\"\"\n A custom logger which will take care\n of logging to console and file.\n \"\"\"\n def __init__(self, filepath):\n \"\"\"\n Constructor\n :param filepath:\n \"\"\"\n self.filepath = filepath\n self.logger = logging.getLogger('util')\n self.logger.setLevel(logging.DEBUG)\n self._formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # file handler\n file_handller = logging.FileHandler(os.path.join(self.filepath), 'a')\n file_handller.setLevel(logging.DEBUG)\n file_handller.setFormatter(self._formatter)\n self.logger.addHandler(file_handller)\n # console handler\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.ERROR)\n con_handler.setFormatter(self._formatter)\n self.logger.addHandler(con_handler)\n\n\nlog_file = \"slb_config.log\"\nlogger = Logger(log_file).logger\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in f:
line = line.rstrip('\n')
line = line.replace('[', '')
splitted = line.split(']')
stringTime = splitted[0]
stringTask = splitted[1]
datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')
lineTuple = datetimeTime, stringTask
infos.append(lineTuple)
<|reserved_special_token_0|>
for dataPoint in infosSorted:
splitted = dataPoint[1].split(' ')
if splitted[1] == 'Guard':
guard = splitted[2].replace('#', '')
if splitted[1] == 'falls':
sleeping = True
sleepingTimeStart = dataPoint[0]
if splitted[1] == 'wakes':
sleeping = False
sleepingTimeStop = dataPoint[0]
sleepingTime = sleepingTimeStop - sleepingTimeStart
for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):
sleepingMinutes[int(guard)][x] += 1
<|reserved_special_token_0|>
for x in sleepingMinutes:
summa = sum(x)
minuutti = x.index(max(x))
if maxVartija < summa:
maxVartija = vartija
maxMinuutti = minuutti
maxMinuutit = summa
vartija += 1
print('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' +
str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))
print('Vastaus on siis ' + str(maxVartija * maxMinuutti))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f = open('input_task.txt', 'r')
width = 60
height = 5000
sleepingMinutes = [[(0) for x in range(width)] for y in range(height)]
infos = []
for line in f:
line = line.rstrip('\n')
line = line.replace('[', '')
splitted = line.split(']')
stringTime = splitted[0]
stringTask = splitted[1]
datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')
lineTuple = datetimeTime, stringTask
infos.append(lineTuple)
infosSorted = sorted(infos, key=lambda time: time[0])
sleeping = False
for dataPoint in infosSorted:
splitted = dataPoint[1].split(' ')
if splitted[1] == 'Guard':
guard = splitted[2].replace('#', '')
if splitted[1] == 'falls':
sleeping = True
sleepingTimeStart = dataPoint[0]
if splitted[1] == 'wakes':
sleeping = False
sleepingTimeStop = dataPoint[0]
sleepingTime = sleepingTimeStop - sleepingTimeStart
for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):
sleepingMinutes[int(guard)][x] += 1
maxVartija = 0
maxMinuutti = 0
maxMinuutit = 0
vartija = 0
for x in sleepingMinutes:
summa = sum(x)
minuutti = x.index(max(x))
if maxVartija < summa:
maxVartija = vartija
maxMinuutti = minuutti
maxMinuutit = summa
vartija += 1
print('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' +
str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))
print('Vastaus on siis ' + str(maxVartija * maxMinuutti))
<|reserved_special_token_1|>
import os
import sys
import string
from array import *
from datetime import datetime
f = open('input_task.txt', 'r')
width = 60
height = 5000
sleepingMinutes = [[(0) for x in range(width)] for y in range(height)]
infos = []
for line in f:
line = line.rstrip('\n')
line = line.replace('[', '')
splitted = line.split(']')
stringTime = splitted[0]
stringTask = splitted[1]
datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')
lineTuple = datetimeTime, stringTask
infos.append(lineTuple)
infosSorted = sorted(infos, key=lambda time: time[0])
sleeping = False
for dataPoint in infosSorted:
splitted = dataPoint[1].split(' ')
if splitted[1] == 'Guard':
guard = splitted[2].replace('#', '')
if splitted[1] == 'falls':
sleeping = True
sleepingTimeStart = dataPoint[0]
if splitted[1] == 'wakes':
sleeping = False
sleepingTimeStop = dataPoint[0]
sleepingTime = sleepingTimeStop - sleepingTimeStart
for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):
sleepingMinutes[int(guard)][x] += 1
maxVartija = 0
maxMinuutti = 0
maxMinuutit = 0
vartija = 0
for x in sleepingMinutes:
summa = sum(x)
minuutti = x.index(max(x))
if maxVartija < summa:
maxVartija = vartija
maxMinuutti = minuutti
maxMinuutit = summa
vartija += 1
print('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' +
str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))
print('Vastaus on siis ' + str(maxVartija * maxMinuutti))
<|reserved_special_token_1|>
import os
import sys
import string
from array import *
from datetime import datetime
#f = open('input_test.txt', 'r')
f = open('input_task.txt', 'r')
width = 60
height = 5000
sleepingMinutes = [[0 for x in range(width)] for y in range(height)]
infos = []
# Change lines to tuples and store to array for sorting
for line in f:
line = line.rstrip('\n')
line = line.replace('[','')
splitted = line.split(']')
stringTime = splitted[0]
stringTask = splitted[1]
datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')
lineTuple = (datetimeTime, stringTask)
infos.append(lineTuple)
#print(datetimeTime.minute)
# sort the info we have
infosSorted = sorted(infos, key=lambda time: time[0])
#print(infos)
#print(infosSorted)
sleeping = False
for dataPoint in infosSorted:
splitted = dataPoint[1].split(' ')
#print(splitted)
if splitted[1] == 'Guard':
#print('Vartija vaihtui, vuorossa: ' + splitted[2])
guard = splitted[2].replace('#','')
if splitted[1] == 'falls':
sleeping = True
sleepingTimeStart = dataPoint[0]
#print('vartija ' + guard + ' nukahti hetkellä ' + str(sleepingTimeStart))
if splitted[1] == 'wakes':
sleeping = False
sleepingTimeStop = dataPoint[0]
sleepingTime = sleepingTimeStop - sleepingTimeStart
#print('vartija ' + guard + ' heräsi hetkellä ' + str(sleepingTimeStop) + ' nukkuen ' + str(sleepingTime))
for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):
sleepingMinutes[int(guard)][x] += 1
maxVartija = 0
maxMinuutti = 0
maxMinuutit = 0
vartija = 0
for x in sleepingMinutes:
summa = sum(x)
minuutti = x.index(max(x))
#print(x)
#print('yhteensä ' + str(summa) + ' nukkui eniten minuutilla ' + str(maxMinuutti))
if maxVartija < summa:
maxVartija = vartija
maxMinuutti = minuutti
maxMinuutit = summa
vartija += 1
print('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' + str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))
print('Vastaus on siis ' + str(maxVartija*maxMinuutti))
|
flexible
|
{
"blob_id": "293533d07b530be9e8f97f1720619bf6c3113cca",
"index": 9447,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in f:\n line = line.rstrip('\\n')\n line = line.replace('[', '')\n splitted = line.split(']')\n stringTime = splitted[0]\n stringTask = splitted[1]\n datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')\n lineTuple = datetimeTime, stringTask\n infos.append(lineTuple)\n<mask token>\nfor dataPoint in infosSorted:\n splitted = dataPoint[1].split(' ')\n if splitted[1] == 'Guard':\n guard = splitted[2].replace('#', '')\n if splitted[1] == 'falls':\n sleeping = True\n sleepingTimeStart = dataPoint[0]\n if splitted[1] == 'wakes':\n sleeping = False\n sleepingTimeStop = dataPoint[0]\n sleepingTime = sleepingTimeStop - sleepingTimeStart\n for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):\n sleepingMinutes[int(guard)][x] += 1\n<mask token>\nfor x in sleepingMinutes:\n summa = sum(x)\n minuutti = x.index(max(x))\n if maxVartija < summa:\n maxVartija = vartija\n maxMinuutti = minuutti\n maxMinuutit = summa\n vartija += 1\nprint('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' +\n str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))\nprint('Vastaus on siis ' + str(maxVartija * maxMinuutti))\n",
"step-3": "<mask token>\nf = open('input_task.txt', 'r')\nwidth = 60\nheight = 5000\nsleepingMinutes = [[(0) for x in range(width)] for y in range(height)]\ninfos = []\nfor line in f:\n line = line.rstrip('\\n')\n line = line.replace('[', '')\n splitted = line.split(']')\n stringTime = splitted[0]\n stringTask = splitted[1]\n datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')\n lineTuple = datetimeTime, stringTask\n infos.append(lineTuple)\ninfosSorted = sorted(infos, key=lambda time: time[0])\nsleeping = False\nfor dataPoint in infosSorted:\n splitted = dataPoint[1].split(' ')\n if splitted[1] == 'Guard':\n guard = splitted[2].replace('#', '')\n if splitted[1] == 'falls':\n sleeping = True\n sleepingTimeStart = dataPoint[0]\n if splitted[1] == 'wakes':\n sleeping = False\n sleepingTimeStop = dataPoint[0]\n sleepingTime = sleepingTimeStop - sleepingTimeStart\n for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):\n sleepingMinutes[int(guard)][x] += 1\nmaxVartija = 0\nmaxMinuutti = 0\nmaxMinuutit = 0\nvartija = 0\nfor x in sleepingMinutes:\n summa = sum(x)\n minuutti = x.index(max(x))\n if maxVartija < summa:\n maxVartija = vartija\n maxMinuutti = minuutti\n maxMinuutit = summa\n vartija += 1\nprint('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' +\n str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))\nprint('Vastaus on siis ' + str(maxVartija * maxMinuutti))\n",
"step-4": "import os\nimport sys\nimport string\nfrom array import *\nfrom datetime import datetime\nf = open('input_task.txt', 'r')\nwidth = 60\nheight = 5000\nsleepingMinutes = [[(0) for x in range(width)] for y in range(height)]\ninfos = []\nfor line in f:\n line = line.rstrip('\\n')\n line = line.replace('[', '')\n splitted = line.split(']')\n stringTime = splitted[0]\n stringTask = splitted[1]\n datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')\n lineTuple = datetimeTime, stringTask\n infos.append(lineTuple)\ninfosSorted = sorted(infos, key=lambda time: time[0])\nsleeping = False\nfor dataPoint in infosSorted:\n splitted = dataPoint[1].split(' ')\n if splitted[1] == 'Guard':\n guard = splitted[2].replace('#', '')\n if splitted[1] == 'falls':\n sleeping = True\n sleepingTimeStart = dataPoint[0]\n if splitted[1] == 'wakes':\n sleeping = False\n sleepingTimeStop = dataPoint[0]\n sleepingTime = sleepingTimeStop - sleepingTimeStart\n for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):\n sleepingMinutes[int(guard)][x] += 1\nmaxVartija = 0\nmaxMinuutti = 0\nmaxMinuutit = 0\nvartija = 0\nfor x in sleepingMinutes:\n summa = sum(x)\n minuutti = x.index(max(x))\n if maxVartija < summa:\n maxVartija = vartija\n maxMinuutti = minuutti\n maxMinuutit = summa\n vartija += 1\nprint('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' +\n str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))\nprint('Vastaus on siis ' + str(maxVartija * maxMinuutti))\n",
"step-5": "import os\nimport sys\nimport string\nfrom array import *\nfrom datetime import datetime\n\n#f = open('input_test.txt', 'r')\nf = open('input_task.txt', 'r')\n\nwidth = 60\nheight = 5000\nsleepingMinutes = [[0 for x in range(width)] for y in range(height)]\n\ninfos = []\n\n# Change lines to tuples and store to array for sorting\nfor line in f:\n line = line.rstrip('\\n')\n line = line.replace('[','')\n splitted = line.split(']')\n stringTime = splitted[0]\n stringTask = splitted[1]\n datetimeTime = datetime.strptime(stringTime, '%Y-%m-%d %H:%M')\n lineTuple = (datetimeTime, stringTask)\n infos.append(lineTuple)\n #print(datetimeTime.minute)\n\n# sort the info we have\ninfosSorted = sorted(infos, key=lambda time: time[0])\n#print(infos)\n#print(infosSorted)\n\nsleeping = False\n\nfor dataPoint in infosSorted:\n splitted = dataPoint[1].split(' ')\n #print(splitted)\n if splitted[1] == 'Guard':\n #print('Vartija vaihtui, vuorossa: ' + splitted[2])\n guard = splitted[2].replace('#','')\n if splitted[1] == 'falls':\n sleeping = True\n sleepingTimeStart = dataPoint[0]\n #print('vartija ' + guard + ' nukahti hetkellä ' + str(sleepingTimeStart))\n if splitted[1] == 'wakes':\n sleeping = False\n sleepingTimeStop = dataPoint[0]\n sleepingTime = sleepingTimeStop - sleepingTimeStart\n #print('vartija ' + guard + ' heräsi hetkellä ' + str(sleepingTimeStop) + ' nukkuen ' + str(sleepingTime))\n for x in range(sleepingTimeStart.minute, sleepingTimeStop.minute):\n sleepingMinutes[int(guard)][x] += 1\n\nmaxVartija = 0\nmaxMinuutti = 0\nmaxMinuutit = 0\nvartija = 0\n\nfor x in sleepingMinutes:\n summa = sum(x)\n minuutti = x.index(max(x))\n #print(x)\n #print('yhteensä ' + str(summa) + ' nukkui eniten minuutilla ' + str(maxMinuutti))\n if maxVartija < summa:\n maxVartija = vartija\n maxMinuutti = minuutti\n maxMinuutit = summa\n vartija += 1\n\nprint('Eniten nukkui vartija #' + str(maxVartija) + ' nukkuen yhteensä ' + str(maxMinuutit) + ' minuuttia ja eniten minuutilla ' + str(maxMinuutti))\nprint('Vastaus on siis ' + str(maxVartija*maxMinuutti))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestFabric(unittest.TestCase):
def setUp(self):
env.test_home = os.path.join(env.localroot, 'deploy', 'test')
user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',
'test', 'machines_user.yml')))
env.update(user_config['default'])
execute(planck)
sys.modules['deploy.fab'].run = lambda command: self.commands.append(
command)
def mock_local(command, original=sys.modules['deploy.fab'].local):
self.commands.append(command)
original(command)
sys.modules['deploy.fab'].local = mock_local
sys.modules['deploy.fab'
].put = lambda source, target: self.commands.append('put ' +
source + ' ' + target)
sys.modules['deploy.fab'
].rsync_project = lambda **args: self.commands.append('rsync ' +
args['local_dir'] + ' ' + args['remote_dir'])
def mock_profile(profile, original=sys.modules['deploy.fab'].generate):
self.commands.append('generate %g %g %g' % (profile.VoxelSize,
profile.Steps, profile.Cycles))
original(profile)
sys.modules['deploy.fab'].generate = mock_profile
self.commands = []
env.build_number = 'abcd1234'
def assertCommandCount(self, should_be):
self.assertEqual(len(self.commands), should_be)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_with_job(self):
with settings(results_path='banana', local_results='pineapple'):
with_job('foo')
self.assertEqual(env.job_results, 'banana/foo')
self.assertEqual(env.job_results_local, 'pineapple/foo')
def test_with_template_job(self):
with settings(results_path='banana', foo='fish', bar='swim',
job_name_template='${foo}_${bar}'):
with_template_job()
self.assertEqual(env.job_results, 'banana/fish_swim')
def test_hemelb(self):
execute(hemelb, 'cylinder', cores=5)
self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)
self.assertCommandRegexp(
'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5
)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)
self.assertCommandRegexp(
'.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)
self.assertCommandCount(9)
<|reserved_special_token_0|>
def test_create_config(self):
execute(create_config, 'cylinder', VoxelSize=0.1)
self.assertEqual(env.config, 'cylinder_0_1_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2)
<|reserved_special_token_0|>
def test_hemelb_profile(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)
self.assertCommandRegexp('rsync .*config_files/cylinder', 3)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 6)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 7)
self.assertCommandRegexp('put .*env.yml', 8)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 9)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)
self.assertCommandCount(2 * 11 + 9 * 11 * 5)
def test_hemelb_profile_no_config_generation(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]', create_configs='False')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 5)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 7)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)
self.assertCommandCount(9 * 11 * 5)
def test_configure_default(self):
execute(configure)
target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':
'-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,
'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':
env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}
self.assertEqual(env.total_cmake_options, target)
for key, value in target.iteritems():
self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))
def test_configure_debug(self):
execute(configure, 'debug')
self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':
'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',
'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.
install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.
install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})
def test_script_template(self):
script = script_templates('dummy_ge_header', 'dummy_jobscript',
commands=['extra'])
content = open(script).read()
self.assertEqual(content, 'user: test_user\n\nrun bananas\n\nextra')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFabric(unittest.TestCase):
def setUp(self):
env.test_home = os.path.join(env.localroot, 'deploy', 'test')
user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',
'test', 'machines_user.yml')))
env.update(user_config['default'])
execute(planck)
sys.modules['deploy.fab'].run = lambda command: self.commands.append(
command)
def mock_local(command, original=sys.modules['deploy.fab'].local):
self.commands.append(command)
original(command)
sys.modules['deploy.fab'].local = mock_local
sys.modules['deploy.fab'
].put = lambda source, target: self.commands.append('put ' +
source + ' ' + target)
sys.modules['deploy.fab'
].rsync_project = lambda **args: self.commands.append('rsync ' +
args['local_dir'] + ' ' + args['remote_dir'])
def mock_profile(profile, original=sys.modules['deploy.fab'].generate):
self.commands.append('generate %g %g %g' % (profile.VoxelSize,
profile.Steps, profile.Cycles))
original(profile)
sys.modules['deploy.fab'].generate = mock_profile
self.commands = []
env.build_number = 'abcd1234'
def assertCommandCount(self, should_be):
self.assertEqual(len(self.commands), should_be)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_clean(self):
execute(clean)
self.assertCommand('make clean')
def test_with_job(self):
with settings(results_path='banana', local_results='pineapple'):
with_job('foo')
self.assertEqual(env.job_results, 'banana/foo')
self.assertEqual(env.job_results_local, 'pineapple/foo')
def test_with_template_job(self):
with settings(results_path='banana', foo='fish', bar='swim',
job_name_template='${foo}_${bar}'):
with_template_job()
self.assertEqual(env.job_results, 'banana/fish_swim')
def test_hemelb(self):
execute(hemelb, 'cylinder', cores=5)
self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)
self.assertCommandRegexp(
'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5
)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)
self.assertCommandRegexp(
'.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)
self.assertCommandCount(9)
<|reserved_special_token_0|>
def test_create_config(self):
execute(create_config, 'cylinder', VoxelSize=0.1)
self.assertEqual(env.config, 'cylinder_0_1_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2)
def test_create_configs(self):
execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')
self.assertEqual(env.config, 'cylinder_0_2_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2 * 11)
def test_hemelb_profile(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)
self.assertCommandRegexp('rsync .*config_files/cylinder', 3)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 6)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 7)
self.assertCommandRegexp('put .*env.yml', 8)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 9)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)
self.assertCommandCount(2 * 11 + 9 * 11 * 5)
def test_hemelb_profile_no_config_generation(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]', create_configs='False')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 5)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 7)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)
self.assertCommandCount(9 * 11 * 5)
def test_configure_default(self):
execute(configure)
target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':
'-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,
'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':
env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}
self.assertEqual(env.total_cmake_options, target)
for key, value in target.iteritems():
self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))
def test_configure_debug(self):
execute(configure, 'debug')
self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':
'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',
'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.
install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.
install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})
def test_script_template(self):
script = script_templates('dummy_ge_header', 'dummy_jobscript',
commands=['extra'])
content = open(script).read()
self.assertEqual(content, 'user: test_user\n\nrun bananas\n\nextra')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFabric(unittest.TestCase):
def setUp(self):
env.test_home = os.path.join(env.localroot, 'deploy', 'test')
user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',
'test', 'machines_user.yml')))
env.update(user_config['default'])
execute(planck)
sys.modules['deploy.fab'].run = lambda command: self.commands.append(
command)
def mock_local(command, original=sys.modules['deploy.fab'].local):
self.commands.append(command)
original(command)
sys.modules['deploy.fab'].local = mock_local
sys.modules['deploy.fab'
].put = lambda source, target: self.commands.append('put ' +
source + ' ' + target)
sys.modules['deploy.fab'
].rsync_project = lambda **args: self.commands.append('rsync ' +
args['local_dir'] + ' ' + args['remote_dir'])
def mock_profile(profile, original=sys.modules['deploy.fab'].generate):
self.commands.append('generate %g %g %g' % (profile.VoxelSize,
profile.Steps, profile.Cycles))
original(profile)
sys.modules['deploy.fab'].generate = mock_profile
self.commands = []
env.build_number = 'abcd1234'
def assertCommandCount(self, should_be):
self.assertEqual(len(self.commands), should_be)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_machine_alias(self):
self.assertEqual(env.remote, 'planck.chem.ucl.ac.uk')
execute(julian)
self.assertEqual(env.remote, 'julian.chem.ucl.ac.uk')
execute(hector)
self.assertEqual(env.remote, 'login.hector.ac.uk')
def test_clean(self):
execute(clean)
self.assertCommand('make clean')
def test_with_job(self):
with settings(results_path='banana', local_results='pineapple'):
with_job('foo')
self.assertEqual(env.job_results, 'banana/foo')
self.assertEqual(env.job_results_local, 'pineapple/foo')
def test_with_template_job(self):
with settings(results_path='banana', foo='fish', bar='swim',
job_name_template='${foo}_${bar}'):
with_template_job()
self.assertEqual(env.job_results, 'banana/fish_swim')
def test_hemelb(self):
execute(hemelb, 'cylinder', cores=5)
self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)
self.assertCommandRegexp(
'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5
)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)
self.assertCommandRegexp(
'.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)
self.assertCommandCount(9)
def test_hemelbs(self):
execute(hemelbs, 'cylinder', cores='[1:6:1]')
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp('cylinder_abcd1234_planck_5_10_10.sh')
self.assertCommandCount(9 * 5)
def test_create_config(self):
execute(create_config, 'cylinder', VoxelSize=0.1)
self.assertEqual(env.config, 'cylinder_0_1_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2)
def test_create_configs(self):
execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')
self.assertEqual(env.config, 'cylinder_0_2_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2 * 11)
def test_hemelb_profile(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)
self.assertCommandRegexp('rsync .*config_files/cylinder', 3)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 6)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 7)
self.assertCommandRegexp('put .*env.yml', 8)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 9)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)
self.assertCommandCount(2 * 11 + 9 * 11 * 5)
def test_hemelb_profile_no_config_generation(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]', create_configs='False')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 5)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 7)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)
self.assertCommandCount(9 * 11 * 5)
def test_configure_default(self):
execute(configure)
target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':
'-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,
'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':
env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}
self.assertEqual(env.total_cmake_options, target)
for key, value in target.iteritems():
self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))
def test_configure_debug(self):
execute(configure, 'debug')
self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':
'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',
'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.
install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.
install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})
def test_script_template(self):
script = script_templates('dummy_ge_header', 'dummy_jobscript',
commands=['extra'])
content = open(script).read()
self.assertEqual(content, 'user: test_user\n\nrun bananas\n\nextra')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
import sys
import copy
import textwrap
from ..fab import *
class TestFabric(unittest.TestCase):
def setUp(self):
env.test_home = os.path.join(env.localroot, 'deploy', 'test')
user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',
'test', 'machines_user.yml')))
env.update(user_config['default'])
execute(planck)
sys.modules['deploy.fab'].run = lambda command: self.commands.append(
command)
def mock_local(command, original=sys.modules['deploy.fab'].local):
self.commands.append(command)
original(command)
sys.modules['deploy.fab'].local = mock_local
sys.modules['deploy.fab'
].put = lambda source, target: self.commands.append('put ' +
source + ' ' + target)
sys.modules['deploy.fab'
].rsync_project = lambda **args: self.commands.append('rsync ' +
args['local_dir'] + ' ' + args['remote_dir'])
def mock_profile(profile, original=sys.modules['deploy.fab'].generate):
self.commands.append('generate %g %g %g' % (profile.VoxelSize,
profile.Steps, profile.Cycles))
original(profile)
sys.modules['deploy.fab'].generate = mock_profile
self.commands = []
env.build_number = 'abcd1234'
def assertCommandCount(self, should_be):
self.assertEqual(len(self.commands), should_be)
def assertCommand(self, should_be, index=-1):
self.assertEqual(self.commands[index], should_be)
def assertCommandRegexp(self, should_be, index=-1):
self.assertRegexpMatches(self.commands[index], should_be)
def test_machine_alias(self):
self.assertEqual(env.remote, 'planck.chem.ucl.ac.uk')
execute(julian)
self.assertEqual(env.remote, 'julian.chem.ucl.ac.uk')
execute(hector)
self.assertEqual(env.remote, 'login.hector.ac.uk')
def test_clean(self):
execute(clean)
self.assertCommand('make clean')
def test_with_job(self):
with settings(results_path='banana', local_results='pineapple'):
with_job('foo')
self.assertEqual(env.job_results, 'banana/foo')
self.assertEqual(env.job_results_local, 'pineapple/foo')
def test_with_template_job(self):
with settings(results_path='banana', foo='fish', bar='swim',
job_name_template='${foo}_${bar}'):
with_template_job()
self.assertEqual(env.job_results, 'banana/fish_swim')
def test_hemelb(self):
execute(hemelb, 'cylinder', cores=5)
self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)
self.assertCommandRegexp(
'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5
)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)
self.assertCommandRegexp(
'.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)
self.assertCommandCount(9)
def test_hemelbs(self):
execute(hemelbs, 'cylinder', cores='[1:6:1]')
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp('cylinder_abcd1234_planck_5_10_10.sh')
self.assertCommandCount(9 * 5)
def test_create_config(self):
execute(create_config, 'cylinder', VoxelSize=0.1)
self.assertEqual(env.config, 'cylinder_0_1_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2)
def test_create_configs(self):
execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')
self.assertEqual(env.config, 'cylinder_0_2_1000_3')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandCount(2 * 11)
def test_hemelb_profile(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)
self.assertCommand('generate 0.1 1000 3', 1)
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)
self.assertCommandRegexp('rsync .*config_files/cylinder', 3)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 6)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 7)
self.assertCommandRegexp('put .*env.yml', 8)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 9)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)
self.assertCommandCount(2 * 11 + 9 * 11 * 5)
def test_hemelb_profile_no_config_generation(self):
execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',
cores='[1:6:1]', create_configs='False')
self.assertEqual(env.name,
'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')
self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)
self.assertCommandRegexp('rsync .*config_files/cylinder', 1)
self.assertCommandRegexp(
'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)
self.assertCommandRegexp(
'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3
)
self.assertCommandRegexp(
'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 4)
self.assertCommandRegexp(
'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'
, 5)
self.assertCommandRegexp('put .*env.yml', 6)
self.assertCommandRegexp(
'chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'
, 7)
self.assertCommandRegexp(
'.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)
self.assertCommandCount(9 * 11 * 5)
def test_configure_default(self):
execute(configure)
target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':
'-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,
'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':
env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}
self.assertEqual(env.total_cmake_options, target)
for key, value in target.iteritems():
self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))
def test_configure_debug(self):
execute(configure, 'debug')
self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':
'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',
'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.
install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.
install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})
def test_script_template(self):
script = script_templates('dummy_ge_header', 'dummy_jobscript',
commands=['extra'])
content = open(script).read()
self.assertEqual(content, 'user: test_user\n\nrun bananas\n\nextra')
<|reserved_special_token_1|>
#!/usr/bin/env python
#
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is CONFIDENTIAL. You may not work
# with, install, use, duplicate, modify, redistribute or share this
# file, or any part thereof, other than as allowed by any agreement
# specifically made by you with University College London.
#
# encoding: utf-8
"""
test_machine_environment.py
Created by James Hetherington on 2012-01-19.
Copyright (c) 2012 UCL. All rights reserved.
"""
import unittest
import sys
import copy
import textwrap
from ..fab import *
class TestFabric(unittest.TestCase):
def setUp(self):
#Update the user config with testing example
env.test_home=os.path.join(env.localroot,'deploy','test')
user_config=yaml.load(open(os.path.join(env.localroot,'deploy','test','machines_user.yml')))
env.update(user_config['default'])
execute(planck) #Default machine target is assumed as planck.
#Monkeypatch the fabric commands to do nothing, but record what they would have done
sys.modules['deploy.fab'].run=lambda command: self.commands.append(command)
def mock_local(command,original=sys.modules['deploy.fab'].local):
self.commands.append(command)
original(command)
sys.modules['deploy.fab'].local=mock_local
sys.modules['deploy.fab'].put=lambda source,target: self.commands.append("put "+source+" "+target)
sys.modules['deploy.fab'].rsync_project=lambda **args: self.commands.append("rsync "+args['local_dir']+" "+args['remote_dir'])
def mock_profile(profile,original=sys.modules['deploy.fab'].generate):
self.commands.append("generate %g %g %g"%(profile.VoxelSize, profile.Steps , profile.Cycles) )
original(profile)
sys.modules['deploy.fab'].generate=mock_profile
self.commands=[]
env.build_number='abcd1234'
def assertCommandCount(self,should_be):
self.assertEqual(len(self.commands),should_be)
def assertCommand(self,should_be,index=-1):
self.assertEqual(self.commands[index],should_be)
def assertCommandRegexp(self,should_be,index=-1):
self.assertRegexpMatches(self.commands[index],should_be)
def test_machine_alias(self):
self.assertEqual(env.remote,"planck.chem.ucl.ac.uk")
execute(julian)
self.assertEqual(env.remote,"julian.chem.ucl.ac.uk")
execute(hector)
self.assertEqual(env.remote,"login.hector.ac.uk")
def test_clean(self):
execute(clean)
self.assertCommand('make clean')
def test_with_job(self):
with settings(results_path="banana",local_results='pineapple'):
with_job('foo')
self.assertEqual(env.job_results,"banana/foo")
self.assertEqual(env.job_results_local,"pineapple/foo")
def test_with_template_job(self):
with settings(results_path='banana',foo='fish',bar='swim',job_name_template="${foo}_${bar}"):
with_template_job()
self.assertEqual(env.job_results,"banana/fish_swim")
def test_hemelb(self):
execute(hemelb,'cylinder',cores=5)
self.assertEqual(env.name,"cylinder_abcd1234_planck_5_10_10")
self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)
self.assertCommandRegexp('rsync .*config_files/cylinder',1)
self.assertCommandRegexp("put .*scripts/cylinder_abcd1234_planck_5_10_10.sh",2)
self.assertCommandRegexp("mkdir -p .*results/cylinder_abcd1234_planck_5_10_10",3)
self.assertCommandRegexp("cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10",4)
self.assertCommandRegexp("cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10",5)
self.assertCommandRegexp("put .*env.yml",6)
self.assertCommandRegexp("chmod u\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh",7)
self.assertCommandRegexp(".*scripts/cylinder_abcd1234_planck_5_10_10.sh",8)
self.assertCommandCount(9)
def test_hemelbs(self):
execute(hemelbs,'cylinder',cores='[1:6:1]')
self.assertCommandRegexp('rsync .*config_files/cylinder',1)
self.assertCommandRegexp("cylinder_abcd1234_planck_5_10_10.sh")
self.assertCommandCount(9*5)
def test_create_config(self):
execute(create_config,'cylinder',VoxelSize=0.1)
self.assertEqual(env.config,"cylinder_0_1_1000_3")
self.assertCommandRegexp("mkdir -p .*/configs/cylinder_0_1_1000_3",0)
self.assertCommand("generate 0.1 1000 3",1)
self.assertCommandCount(2)
def test_create_configs(self):
execute(create_configs,'cylinder',VoxelSize='[0.1:0.21:0.01]')
self.assertEqual(env.config,"cylinder_0_2_1000_3")
self.assertCommandRegexp("mkdir -p .*/configs/cylinder_0_1_1000_3",0)
self.assertCommand("generate 0.1 1000 3",1)
self.assertCommandCount(2*11)
def test_hemelb_profile(self):
execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]')
self.assertEqual(env.name,"cylinder_0_2_1000_3_abcd1234_planck_5_10_10")
self.assertCommandRegexp("mkdir -p .*/configs/cylinder_0_1_1000_3",0)
self.assertCommand("generate 0.1 1000 3",1)
self.assertCommandRegexp('mkdir -p .*config_files/cylinder',2)
self.assertCommandRegexp('rsync .*config_files/cylinder',3)
self.assertCommandRegexp("put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",4)
self.assertCommandRegexp("mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",5)
self.assertCommandRegexp("cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",6)
self.assertCommandRegexp("cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",7)
self.assertCommandRegexp("put .*env.yml",8)
self.assertCommandRegexp("chmod u\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",9)
self.assertCommandRegexp(".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",10)
self.assertCommandCount(2*11 + 9*11*5)
def test_hemelb_profile_no_config_generation(self):
execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]',create_configs="False")
self.assertEqual(env.name,"cylinder_0_2_1000_3_abcd1234_planck_5_10_10")
self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)
self.assertCommandRegexp('rsync .*config_files/cylinder',1)
self.assertCommandRegexp("put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",2)
self.assertCommandRegexp("mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",3)
self.assertCommandRegexp("cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",4)
self.assertCommandRegexp("cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",5)
self.assertCommandRegexp("put .*env.yml",6)
self.assertCommandRegexp("chmod u\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",7)
self.assertCommandRegexp(".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",8)
self.assertCommandCount(9*11*5)
def test_configure_default(self):
execute(configure)
target={
'CMAKE_BUILD_TYPE': "Release",
'CMAKE_CXX_FLAGS_RELEASE': "-O4",
'CMAKE_INSTALL_PREFIX': env.install_path,
'CPPUNIT_PATCH_LDL' : True,
"HEMELB_DEPENDENCIES_INSTALL_PATH": env.install_path,
"HEMELB_SUBPROJECT_MAKE_JOBS": 1
}
self.assertEqual(env.total_cmake_options,target)
#Can't just assert on a string here, as the order of the dict is not defined
for key,value in target.iteritems():
self.assertRegexpMatches(env.cmake_flags,"-D%s=%s"%(key,value))
def test_configure_debug(self):
execute(configure,'debug')
self.assertEqual(env.total_cmake_options,
{
'CMAKE_BUILD_TYPE': "Debug",
'HEMELB_OPTIMISATION': "",
'HEMELB_LOG_LEVEL': "debug",
'CPPUNIT_PATCH_LDL' : True,
'CMAKE_INSTALL_PREFIX': env.install_path,
"HEMELB_DEPENDENCIES_INSTALL_PATH": env.install_path,
"HEMELB_SUBPROJECT_MAKE_JOBS": 1
})
def test_script_template(self):
script=script_templates('dummy_ge_header','dummy_jobscript',commands=['extra'])
content=open(script).read()
self.assertEqual(content,"user: test_user\n\nrun bananas\n\nextra")
|
flexible
|
{
"blob_id": "7700e3c4061f0e81a1dea8fa8b27a0380fc26e71",
"index": 7171,
"step-1": "<mask token>\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n <mask token>\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n <mask token>\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-2": "<mask token>\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n <mask token>\n <mask token>\n <mask token>\n\n def test_clean(self):\n execute(clean)\n self.assertCommand('make clean')\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n <mask token>\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n\n def test_create_configs(self):\n execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config, 'cylinder_0_2_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2 * 11)\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-3": "<mask token>\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n <mask token>\n <mask token>\n\n def test_machine_alias(self):\n self.assertEqual(env.remote, 'planck.chem.ucl.ac.uk')\n execute(julian)\n self.assertEqual(env.remote, 'julian.chem.ucl.ac.uk')\n execute(hector)\n self.assertEqual(env.remote, 'login.hector.ac.uk')\n\n def test_clean(self):\n execute(clean)\n self.assertCommand('make clean')\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n\n def test_hemelbs(self):\n execute(hemelbs, 'cylinder', cores='[1:6:1]')\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp('cylinder_abcd1234_planck_5_10_10.sh')\n self.assertCommandCount(9 * 5)\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n\n def test_create_configs(self):\n execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config, 'cylinder_0_2_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2 * 11)\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-4": "<mask token>\nimport unittest\nimport sys\nimport copy\nimport textwrap\nfrom ..fab import *\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n\n def assertCommand(self, should_be, index=-1):\n self.assertEqual(self.commands[index], should_be)\n\n def assertCommandRegexp(self, should_be, index=-1):\n self.assertRegexpMatches(self.commands[index], should_be)\n\n def test_machine_alias(self):\n self.assertEqual(env.remote, 'planck.chem.ucl.ac.uk')\n execute(julian)\n self.assertEqual(env.remote, 'julian.chem.ucl.ac.uk')\n execute(hector)\n self.assertEqual(env.remote, 'login.hector.ac.uk')\n\n def test_clean(self):\n execute(clean)\n self.assertCommand('make clean')\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n\n def test_hemelbs(self):\n execute(hemelbs, 'cylinder', cores='[1:6:1]')\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp('cylinder_abcd1234_planck_5_10_10.sh')\n self.assertCommandCount(9 * 5)\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n\n def test_create_configs(self):\n execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config, 'cylinder_0_2_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2 * 11)\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-5": "#!/usr/bin/env python\n# \n# Copyright (C) University College London, 2007-2012, all rights reserved.\n# \n# This file is part of HemeLB and is CONFIDENTIAL. You may not work \n# with, install, use, duplicate, modify, redistribute or share this\n# file, or any part thereof, other than as allowed by any agreement\n# specifically made by you with University College London.\n# \n\n# encoding: utf-8\n\"\"\"\ntest_machine_environment.py\n\nCreated by James Hetherington on 2012-01-19.\nCopyright (c) 2012 UCL. All rights reserved.\n\"\"\"\nimport unittest\nimport sys\nimport copy\nimport textwrap\n\nfrom ..fab import *\n\nclass TestFabric(unittest.TestCase):\n def setUp(self):\n \t#Update the user config with testing example\n \tenv.test_home=os.path.join(env.localroot,'deploy','test')\n \tuser_config=yaml.load(open(os.path.join(env.localroot,'deploy','test','machines_user.yml')))\n \tenv.update(user_config['default'])\n \texecute(planck) #Default machine target is assumed as planck.\n \t#Monkeypatch the fabric commands to do nothing, but record what they would have done\n \tsys.modules['deploy.fab'].run=lambda command: self.commands.append(command)\n \tdef mock_local(command,original=sys.modules['deploy.fab'].local):\n \t self.commands.append(command)\n \t original(command)\n \tsys.modules['deploy.fab'].local=mock_local \n \tsys.modules['deploy.fab'].put=lambda source,target: self.commands.append(\"put \"+source+\" \"+target)\n \tsys.modules['deploy.fab'].rsync_project=lambda **args: self.commands.append(\"rsync \"+args['local_dir']+\" \"+args['remote_dir'])\n \tdef mock_profile(profile,original=sys.modules['deploy.fab'].generate):\n \t self.commands.append(\"generate %g %g %g\"%(profile.VoxelSize, profile.Steps , profile.Cycles) )\n \t original(profile)\n \tsys.modules['deploy.fab'].generate=mock_profile\n \tself.commands=[]\n \tenv.build_number='abcd1234'\n def assertCommandCount(self,should_be):\n self.assertEqual(len(self.commands),should_be)\n def assertCommand(self,should_be,index=-1):\n \tself.assertEqual(self.commands[index],should_be)\n def assertCommandRegexp(self,should_be,index=-1):\n \tself.assertRegexpMatches(self.commands[index],should_be)\n def test_machine_alias(self):\n \tself.assertEqual(env.remote,\"planck.chem.ucl.ac.uk\")\n \texecute(julian)\n \tself.assertEqual(env.remote,\"julian.chem.ucl.ac.uk\")\n \texecute(hector)\n \tself.assertEqual(env.remote,\"login.hector.ac.uk\")\n def test_clean(self):\n \texecute(clean)\n \tself.assertCommand('make clean')\n def test_with_job(self):\n with settings(results_path=\"banana\",local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results,\"banana/foo\")\n self.assertEqual(env.job_results_local,\"pineapple/foo\")\n def test_with_template_job(self):\n with settings(results_path='banana',foo='fish',bar='swim',job_name_template=\"${foo}_${bar}\"): \n with_template_job()\n self.assertEqual(env.job_results,\"banana/fish_swim\")\n def test_hemelb(self):\n execute(hemelb,'cylinder',cores=5)\n self.assertEqual(env.name,\"cylinder_abcd1234_planck_5_10_10\")\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)\n self.assertCommandRegexp('rsync .*config_files/cylinder',1)\n self.assertCommandRegexp(\"put .*scripts/cylinder_abcd1234_planck_5_10_10.sh\",2)\n self.assertCommandRegexp(\"mkdir -p .*results/cylinder_abcd1234_planck_5_10_10\",3)\n self.assertCommandRegexp(\"cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10\",4)\n self.assertCommandRegexp(\"cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10\",5)\n self.assertCommandRegexp(\"put .*env.yml\",6)\n self.assertCommandRegexp(\"chmod u\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh\",7)\n self.assertCommandRegexp(\".*scripts/cylinder_abcd1234_planck_5_10_10.sh\",8)\n self.assertCommandCount(9)\n def test_hemelbs(self):\n execute(hemelbs,'cylinder',cores='[1:6:1]')\n self.assertCommandRegexp('rsync .*config_files/cylinder',1)\n self.assertCommandRegexp(\"cylinder_abcd1234_planck_5_10_10.sh\")\n self.assertCommandCount(9*5)\n def test_create_config(self):\n execute(create_config,'cylinder',VoxelSize=0.1)\n self.assertEqual(env.config,\"cylinder_0_1_1000_3\")\n self.assertCommandRegexp(\"mkdir -p .*/configs/cylinder_0_1_1000_3\",0)\n self.assertCommand(\"generate 0.1 1000 3\",1)\n self.assertCommandCount(2)\n def test_create_configs(self):\n execute(create_configs,'cylinder',VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config,\"cylinder_0_2_1000_3\")\n self.assertCommandRegexp(\"mkdir -p .*/configs/cylinder_0_1_1000_3\",0)\n self.assertCommand(\"generate 0.1 1000 3\",1)\n self.assertCommandCount(2*11)\n def test_hemelb_profile(self):\n execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]')\n self.assertEqual(env.name,\"cylinder_0_2_1000_3_abcd1234_planck_5_10_10\")\n self.assertCommandRegexp(\"mkdir -p .*/configs/cylinder_0_1_1000_3\",0)\n self.assertCommand(\"generate 0.1 1000 3\",1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder',2)\n self.assertCommandRegexp('rsync .*config_files/cylinder',3)\n self.assertCommandRegexp(\"put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",4)\n self.assertCommandRegexp(\"mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",5)\n self.assertCommandRegexp(\"cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",6)\n self.assertCommandRegexp(\"cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",7)\n self.assertCommandRegexp(\"put .*env.yml\",8)\n self.assertCommandRegexp(\"chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",9)\n self.assertCommandRegexp(\".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",10)\n self.assertCommandCount(2*11 + 9*11*5)\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]',create_configs=\"False\")\n self.assertEqual(env.name,\"cylinder_0_2_1000_3_abcd1234_planck_5_10_10\")\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)\n self.assertCommandRegexp('rsync .*config_files/cylinder',1)\n self.assertCommandRegexp(\"put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",2)\n self.assertCommandRegexp(\"mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",3)\n self.assertCommandRegexp(\"cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",4)\n self.assertCommandRegexp(\"cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",5)\n self.assertCommandRegexp(\"put .*env.yml\",6)\n self.assertCommandRegexp(\"chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",7)\n self.assertCommandRegexp(\".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",8)\n self.assertCommandCount(9*11*5)\n def test_configure_default(self):\n execute(configure)\n target={\n 'CMAKE_BUILD_TYPE': \"Release\",\n 'CMAKE_CXX_FLAGS_RELEASE': \"-O4\",\n 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL' : True,\n \"HEMELB_DEPENDENCIES_INSTALL_PATH\": env.install_path,\n \"HEMELB_SUBPROJECT_MAKE_JOBS\": 1\n }\n self.assertEqual(env.total_cmake_options,target)\n #Can't just assert on a string here, as the order of the dict is not defined\n for key,value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags,\"-D%s=%s\"%(key,value))\n def test_configure_debug(self):\n execute(configure,'debug')\n self.assertEqual(env.total_cmake_options,\n {\n 'CMAKE_BUILD_TYPE': \"Debug\",\n 'HEMELB_OPTIMISATION': \"\",\n 'HEMELB_LOG_LEVEL': \"debug\",\n 'CPPUNIT_PATCH_LDL' : True,\n 'CMAKE_INSTALL_PREFIX': env.install_path,\n \"HEMELB_DEPENDENCIES_INSTALL_PATH\": env.install_path,\n \"HEMELB_SUBPROJECT_MAKE_JOBS\": 1\n })\n \n def test_script_template(self):\n script=script_templates('dummy_ge_header','dummy_jobscript',commands=['extra'])\n content=open(script).read()\n self.assertEqual(content,\"user: test_user\\n\\nrun bananas\\n\\nextra\")",
"step-ids": [
12,
14,
16,
19,
20
]
}
|
[
12,
14,
16,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
capture.release()
cv2.destroyAllWindows()
<|reserved_special_token_0|>
if len(faces) >= 1:
sys.stdout.write('1')
else:
sys.stdout.write('0')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
face_cascade = cv2.CascadeClassifier(
'./haar_cascades/haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
_, image = capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
capture.release()
cv2.destroyAllWindows()
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) >= 1:
sys.stdout.write('1')
else:
sys.stdout.write('0')
<|reserved_special_token_1|>
import cv2
import sys
face_cascade = cv2.CascadeClassifier(
'./haar_cascades/haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
_, image = capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
capture.release()
cv2.destroyAllWindows()
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) >= 1:
sys.stdout.write('1')
else:
sys.stdout.write('0')
<|reserved_special_token_1|>
import cv2
import sys
# Load the Haar cascades
face_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
_, image = capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
capture.release()
cv2.destroyAllWindows()
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) >= 1:
sys.stdout.write("1")
else:
sys.stdout.write("0")
|
flexible
|
{
"blob_id": "4d707e23f66e8b6bea05a5901d3d8e459247c6c1",
"index": 3840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncapture.release()\ncv2.destroyAllWindows()\n<mask token>\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-3": "<mask token>\nface_cascade = cv2.CascadeClassifier(\n './haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncapture.release()\ncv2.destroyAllWindows()\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-4": "import cv2\nimport sys\nface_cascade = cv2.CascadeClassifier(\n './haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncapture.release()\ncv2.destroyAllWindows()\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-5": "import cv2\nimport sys\n\n# Load the Haar cascades\nface_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\n\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\ncapture.release()\ncv2.destroyAllWindows()\n\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write(\"1\")\nelse:\n sys.stdout.write(\"0\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
n,k = map(int,raw_input().split())
nums = list(map(int,raw_input().split()))
if k==1:
print min(nums)
elif k==2:
print max(nums[0],nums[-1])
else:
print max(nums)
|
normal
|
{
"blob_id": "041a5bf205c1b3b3029623aa93835e99104464b2",
"index": 2361,
"step-1": "n,k = map(int,raw_input().split())\nnums = list(map(int,raw_input().split()))\nif k==1:\n print min(nums)\nelif k==2:\n print max(nums[0],nums[-1])\nelse:\n print max(nums)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name in f.read().replace('"', '').
split(',')])
return sum([get_name_score(names, name) for name in names])
<|reserved_special_token_0|>
@property
def problem(self):
return """
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name in f.read().replace('"', '').
split(',')])
return sum([get_name_score(names, name) for name in names])
@property
def answer(self):
return ('The total of all the name scores in the file is: %d' %
self.solve())
@property
def problem(self):
return """
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_name_score(l, name):
idx = l.index(name) + 1
val = sum([(ord(c) - 64) for c in name])
return idx * val
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name in f.read().replace('"', '').
split(',')])
return sum([get_name_score(names, name) for name in names])
@property
def answer(self):
return ('The total of all the name scores in the file is: %d' %
self.solve())
@property
def problem(self):
return """
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
"""
<|reserved_special_token_1|>
from euler.baseeuler import BaseEuler
from os import path, getcwd
def get_name_score(l, name):
idx = l.index(name) + 1
val = sum([(ord(c) - 64) for c in name])
return idx * val
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name in f.read().replace('"', '').
split(',')])
return sum([get_name_score(names, name) for name in names])
@property
def answer(self):
return ('The total of all the name scores in the file is: %d' %
self.solve())
@property
def problem(self):
return """
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
"""
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from euler.baseeuler import BaseEuler
from os import path, getcwd
def get_name_score(l, name):
idx = l.index(name) + 1
val = sum([(ord(c) - 64) for c in name])
return idx * val
class Euler(BaseEuler):
def solve(self):
fp = path.join(getcwd(), 'euler/resources/names.txt')
with open(fp, 'r') as f:
names = sorted([name for name
in f.read().replace('"', '').split(',')])
return sum([get_name_score(names, name) for name in names])
@property
def answer(self):
return ('The total of all the name scores in the file is: %d'
% self.solve())
@property
def problem(self):
return '''
Project Euler Problem 22:
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file
containing over five-thousand first names, begin by sorting it into
alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a
name score.
For example, when the list is sorted into alphabetical order, COLIN, which
is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,
COLIN would obtain a score of 938 * 53 = 49714.
What is the total of all the name scores in the file?
'''
|
flexible
|
{
"blob_id": "40d08bfa3286aa30b612ed83b5e9c7a29e9de809",
"index": 6540,
"step-1": "<mask token>\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n <mask token>\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-2": "<mask token>\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d' %\n self.solve())\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-3": "<mask token>\n\n\ndef get_name_score(l, name):\n idx = l.index(name) + 1\n val = sum([(ord(c) - 64) for c in name])\n return idx * val\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d' %\n self.solve())\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-4": "from euler.baseeuler import BaseEuler\nfrom os import path, getcwd\n\n\ndef get_name_score(l, name):\n idx = l.index(name) + 1\n val = sum([(ord(c) - 64) for c in name])\n return idx * val\n\n\nclass Euler(BaseEuler):\n\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name in f.read().replace('\"', '').\n split(',')])\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d' %\n self.solve())\n\n @property\n def problem(self):\n return \"\"\"\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n\"\"\"\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom euler.baseeuler import BaseEuler\nfrom os import path, getcwd\n\n\ndef get_name_score(l, name):\n idx = l.index(name) + 1\n val = sum([(ord(c) - 64) for c in name])\n return idx * val\n\n\nclass Euler(BaseEuler):\n def solve(self):\n fp = path.join(getcwd(), 'euler/resources/names.txt')\n with open(fp, 'r') as f:\n names = sorted([name for name\n in f.read().replace('\"', '').split(',')])\n\n return sum([get_name_score(names, name) for name in names])\n\n @property\n def answer(self):\n return ('The total of all the name scores in the file is: %d'\n % self.solve())\n\n @property\n def problem(self):\n return '''\nProject Euler Problem 22:\n\n Using names.txt (right click and 'Save Link/Target As...'), a 46K text file\n containing over five-thousand first names, begin by sorting it into\n alphabetical order. Then working out the alphabetical value for each name,\n multiply this value by its alphabetical position in the list to obtain a\n name score.\n\n For example, when the list is sorted into alphabetical order, COLIN, which\n is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\n COLIN would obtain a score of 938 * 53 = 49714.\n\n What is the total of all the name scores in the file?\n'''\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Example 15-5. Using a BookDict, but not quite as intended
>>> from books import BookDict
>>> pp = BookDict(title='Programming Pearls',
... authors='Jon Bentley',
... isbn='0201657880',
... pagecount=256)
>>> pp
{'title': 'Programming Pearls', 'authors': 'Jon Bentley', 'isbn': '0201657880', 'pagecount': 256}
>>> type(pp)
<class 'dict'>
>>> pp.title
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'title'
>>> pp['title']
'Programming Pearls'
>>> BookDict.__annotations__
{'isbn': <class 'str'>, 'title': <class 'str'>, 'authors': typing.List[str], 'pagecount': <class 'int'>}
|
normal
|
{
"blob_id": "ab9d8e36518c4d42f1e29fbc5552078a5a338508",
"index": 7010,
"step-1": "# Example 15-5. Using a BookDict, but not quite as intended\n\n>>> from books import BookDict\n>>> pp = BookDict(title='Programming Pearls',\n... authors='Jon Bentley',\n... isbn='0201657880',\n... pagecount=256)\n>>> pp\n{'title': 'Programming Pearls', 'authors': 'Jon Bentley', 'isbn': '0201657880', 'pagecount': 256}\n>>> type(pp)\n<class 'dict'>\n>>> pp.title\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nAttributeError: 'dict' object has no attribute 'title'\n>>> pp['title']\n'Programming Pearls'\n>>> BookDict.__annotations__\n{'isbn': <class 'str'>, 'title': <class 'str'>, 'authors': typing.List[str], 'pagecount': <class 'int'>}\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
a = int(input(
'Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): '))
if a != 0:
linhas = int(input('Informe o número de linhas da matriz: '))
colunas = int(input('Informe o número de colunas da matriz: '))
else:
linhas = np.random.randint(1, 6)
colunas = np.random.randint(1, 6)
lista = np.floor(16 * np.random.random((linhas, colunas)))
primeiro = lista[0, 0]
quantVezes = 0
media = 0
for i in range(linhas):
for j in range(colunas):
if lista[i][j] == primeiro:
quantVezes += 1
media += lista[i][j] / (linhas * colunas)
menorDiferenca = lista[0, 0]
somaPar = 0
for i in range(linhas):
for j in range(colunas):
if np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media):
menorDiferenca = lista[i][j]
if lista[i][j] % 2 == 0:
somaPar += lista[i][j]
u, c = np.unique(lista, return_counts=True)
quantRepetido = linhas * colunas - len(u)
print(lista)
print(f'O maior valor da lista é: {np.amax(lista)}')
print(f'A soma dos elementos é: {np.sum(lista)}')
print(
f'A quantidade de vezes que o primeiro elemento aparece: {quantVezes}')
print(f'A média é: {media}')
print(f'O Valor mais próximo da média é: {menorDiferenca}')
print(f'A soma dos valores múltiplos de 2 é: {somaPar}')
print(f'A quantidade de números repetidos é: {quantRepetido}')
print(f'Lista sem números repetidos: {u}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
a = int(input(
'Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): '))
if a != 0:
linhas = int(input('Informe o número de linhas da matriz: '))
colunas = int(input('Informe o número de colunas da matriz: '))
else:
linhas = np.random.randint(1, 6)
colunas = np.random.randint(1, 6)
lista = np.floor(16 * np.random.random((linhas, colunas)))
primeiro = lista[0, 0]
quantVezes = 0
media = 0
for i in range(linhas):
for j in range(colunas):
if lista[i][j] == primeiro:
quantVezes += 1
media += lista[i][j] / (linhas * colunas)
menorDiferenca = lista[0, 0]
somaPar = 0
for i in range(linhas):
for j in range(colunas):
if np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media):
menorDiferenca = lista[i][j]
if lista[i][j] % 2 == 0:
somaPar += lista[i][j]
u, c = np.unique(lista, return_counts=True)
quantRepetido = linhas * colunas - len(u)
print(lista)
print(f'O maior valor da lista é: {np.amax(lista)}')
print(f'A soma dos elementos é: {np.sum(lista)}')
print(
f'A quantidade de vezes que o primeiro elemento aparece: {quantVezes}')
print(f'A média é: {media}')
print(f'O Valor mais próximo da média é: {menorDiferenca}')
print(f'A soma dos valores múltiplos de 2 é: {somaPar}')
print(f'A quantidade de números repetidos é: {quantRepetido}')
print(f'Lista sem números repetidos: {u}')
main()
<|reserved_special_token_1|>
import numpy as np
def main():
a = int(input(
'Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): '))
if a != 0:
linhas = int(input('Informe o número de linhas da matriz: '))
colunas = int(input('Informe o número de colunas da matriz: '))
else:
linhas = np.random.randint(1, 6)
colunas = np.random.randint(1, 6)
lista = np.floor(16 * np.random.random((linhas, colunas)))
primeiro = lista[0, 0]
quantVezes = 0
media = 0
for i in range(linhas):
for j in range(colunas):
if lista[i][j] == primeiro:
quantVezes += 1
media += lista[i][j] / (linhas * colunas)
menorDiferenca = lista[0, 0]
somaPar = 0
for i in range(linhas):
for j in range(colunas):
if np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media):
menorDiferenca = lista[i][j]
if lista[i][j] % 2 == 0:
somaPar += lista[i][j]
u, c = np.unique(lista, return_counts=True)
quantRepetido = linhas * colunas - len(u)
print(lista)
print(f'O maior valor da lista é: {np.amax(lista)}')
print(f'A soma dos elementos é: {np.sum(lista)}')
print(
f'A quantidade de vezes que o primeiro elemento aparece: {quantVezes}')
print(f'A média é: {media}')
print(f'O Valor mais próximo da média é: {menorDiferenca}')
print(f'A soma dos valores múltiplos de 2 é: {somaPar}')
print(f'A quantidade de números repetidos é: {quantRepetido}')
print(f'Lista sem números repetidos: {u}')
main()
<|reserved_special_token_1|>
import numpy as np
# UM TRABALHO FEITO PELA GRANDE DUPLA PEQUENA Mag e Rud
def main():
a = int(input("Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): "))
if(a != 0):
linhas = int(input("Informe o número de linhas da matriz: "))
colunas = int(input("Informe o número de colunas da matriz: "))
else:
linhas = np.random.randint(1, 6)
colunas = np.random.randint(1, 6)
lista = np.floor(16 * np.random.random((linhas, colunas)))
primeiro = lista[0, 0]
quantVezes = 0
media = 0
for i in range(linhas):
for j in range(colunas):
if(lista[i][j] == primeiro):
quantVezes += 1
media += lista[i][j] / (linhas * colunas)
menorDiferenca = lista[0, 0]
somaPar = 0
for i in range(linhas):
for j in range(colunas):
if(np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media)):
menorDiferenca = lista[i][j]
if(lista[i][j] % 2 == 0):
somaPar += lista[i][j]
u, c = np.unique(lista, return_counts = True)
quantRepetido = linhas * colunas - len(u)
print(lista)
print(f"O maior valor da lista é: {np.amax(lista)}")
print(f"A soma dos elementos é: {np.sum(lista)}")
print(f"A quantidade de vezes que o primeiro elemento aparece: {quantVezes}")
print(f"A média é: {media}")
print(f"O Valor mais próximo da média é: {menorDiferenca}")
print(f"A soma dos valores múltiplos de 2 é: {somaPar}")
print(f"A quantidade de números repetidos é: {quantRepetido}")
print(f"Lista sem números repetidos: {u}")
main()
|
flexible
|
{
"blob_id": "c80ecb97c8863b724169715b766024ce824b9225",
"index": 5572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n a = int(input(\n 'Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): '))\n if a != 0:\n linhas = int(input('Informe o número de linhas da matriz: '))\n colunas = int(input('Informe o número de colunas da matriz: '))\n else:\n linhas = np.random.randint(1, 6)\n colunas = np.random.randint(1, 6)\n lista = np.floor(16 * np.random.random((linhas, colunas)))\n primeiro = lista[0, 0]\n quantVezes = 0\n media = 0\n for i in range(linhas):\n for j in range(colunas):\n if lista[i][j] == primeiro:\n quantVezes += 1\n media += lista[i][j] / (linhas * colunas)\n menorDiferenca = lista[0, 0]\n somaPar = 0\n for i in range(linhas):\n for j in range(colunas):\n if np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media):\n menorDiferenca = lista[i][j]\n if lista[i][j] % 2 == 0:\n somaPar += lista[i][j]\n u, c = np.unique(lista, return_counts=True)\n quantRepetido = linhas * colunas - len(u)\n print(lista)\n print(f'O maior valor da lista é: {np.amax(lista)}')\n print(f'A soma dos elementos é: {np.sum(lista)}')\n print(\n f'A quantidade de vezes que o primeiro elemento aparece: {quantVezes}')\n print(f'A média é: {media}')\n print(f'O Valor mais próximo da média é: {menorDiferenca}')\n print(f'A soma dos valores múltiplos de 2 é: {somaPar}')\n print(f'A quantidade de números repetidos é: {quantRepetido}')\n print(f'Lista sem números repetidos: {u}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n a = int(input(\n 'Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): '))\n if a != 0:\n linhas = int(input('Informe o número de linhas da matriz: '))\n colunas = int(input('Informe o número de colunas da matriz: '))\n else:\n linhas = np.random.randint(1, 6)\n colunas = np.random.randint(1, 6)\n lista = np.floor(16 * np.random.random((linhas, colunas)))\n primeiro = lista[0, 0]\n quantVezes = 0\n media = 0\n for i in range(linhas):\n for j in range(colunas):\n if lista[i][j] == primeiro:\n quantVezes += 1\n media += lista[i][j] / (linhas * colunas)\n menorDiferenca = lista[0, 0]\n somaPar = 0\n for i in range(linhas):\n for j in range(colunas):\n if np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media):\n menorDiferenca = lista[i][j]\n if lista[i][j] % 2 == 0:\n somaPar += lista[i][j]\n u, c = np.unique(lista, return_counts=True)\n quantRepetido = linhas * colunas - len(u)\n print(lista)\n print(f'O maior valor da lista é: {np.amax(lista)}')\n print(f'A soma dos elementos é: {np.sum(lista)}')\n print(\n f'A quantidade de vezes que o primeiro elemento aparece: {quantVezes}')\n print(f'A média é: {media}')\n print(f'O Valor mais próximo da média é: {menorDiferenca}')\n print(f'A soma dos valores múltiplos de 2 é: {somaPar}')\n print(f'A quantidade de números repetidos é: {quantRepetido}')\n print(f'Lista sem números repetidos: {u}')\n\n\nmain()\n",
"step-4": "import numpy as np\n\n\ndef main():\n a = int(input(\n 'Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): '))\n if a != 0:\n linhas = int(input('Informe o número de linhas da matriz: '))\n colunas = int(input('Informe o número de colunas da matriz: '))\n else:\n linhas = np.random.randint(1, 6)\n colunas = np.random.randint(1, 6)\n lista = np.floor(16 * np.random.random((linhas, colunas)))\n primeiro = lista[0, 0]\n quantVezes = 0\n media = 0\n for i in range(linhas):\n for j in range(colunas):\n if lista[i][j] == primeiro:\n quantVezes += 1\n media += lista[i][j] / (linhas * colunas)\n menorDiferenca = lista[0, 0]\n somaPar = 0\n for i in range(linhas):\n for j in range(colunas):\n if np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media):\n menorDiferenca = lista[i][j]\n if lista[i][j] % 2 == 0:\n somaPar += lista[i][j]\n u, c = np.unique(lista, return_counts=True)\n quantRepetido = linhas * colunas - len(u)\n print(lista)\n print(f'O maior valor da lista é: {np.amax(lista)}')\n print(f'A soma dos elementos é: {np.sum(lista)}')\n print(\n f'A quantidade de vezes que o primeiro elemento aparece: {quantVezes}')\n print(f'A média é: {media}')\n print(f'O Valor mais próximo da média é: {menorDiferenca}')\n print(f'A soma dos valores múltiplos de 2 é: {somaPar}')\n print(f'A quantidade de números repetidos é: {quantRepetido}')\n print(f'Lista sem números repetidos: {u}')\n\n\nmain()\n",
"step-5": "import numpy as np\n\n# UM TRABALHO FEITO PELA GRANDE DUPLA PEQUENA Mag e Rud\n\ndef main():\n a = int(input(\"Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): \"))\n if(a != 0):\n linhas = int(input(\"Informe o número de linhas da matriz: \"))\n colunas = int(input(\"Informe o número de colunas da matriz: \"))\n else:\n linhas = np.random.randint(1, 6)\n colunas = np.random.randint(1, 6)\n lista = np.floor(16 * np.random.random((linhas, colunas)))\n primeiro = lista[0, 0]\n quantVezes = 0\n media = 0\n for i in range(linhas):\n for j in range(colunas):\n if(lista[i][j] == primeiro):\n quantVezes += 1\n media += lista[i][j] / (linhas * colunas)\n menorDiferenca = lista[0, 0]\n somaPar = 0\n for i in range(linhas):\n for j in range(colunas):\n if(np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media)):\n menorDiferenca = lista[i][j]\n if(lista[i][j] % 2 == 0):\n somaPar += lista[i][j]\n u, c = np.unique(lista, return_counts = True)\n quantRepetido = linhas * colunas - len(u)\n \n print(lista)\n print(f\"O maior valor da lista é: {np.amax(lista)}\")\n print(f\"A soma dos elementos é: {np.sum(lista)}\")\n print(f\"A quantidade de vezes que o primeiro elemento aparece: {quantVezes}\")\n print(f\"A média é: {media}\")\n print(f\"O Valor mais próximo da média é: {menorDiferenca}\")\n print(f\"A soma dos valores múltiplos de 2 é: {somaPar}\")\n print(f\"A quantidade de números repetidos é: {quantRepetido}\")\n print(f\"Lista sem números repetidos: {u}\") \n \n \n \n \nmain()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(studentMarks[3])
studentMarks.append(95)
for i in studentMarks:
print(i)
<|reserved_special_token_0|>
while i < len(studentMarks):
print(studentMarks[i])
i += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
studentMarks = arr.array('i', [2, 30, 45, 50, 90])
print(studentMarks[3])
studentMarks.append(95)
for i in studentMarks:
print(i)
i = 0
while i < len(studentMarks):
print(studentMarks[i])
i += 1
<|reserved_special_token_1|>
import array as arr
studentMarks = arr.array('i', [2, 30, 45, 50, 90])
print(studentMarks[3])
studentMarks.append(95)
for i in studentMarks:
print(i)
i = 0
while i < len(studentMarks):
print(studentMarks[i])
i += 1
<|reserved_special_token_1|>
import array as arr
# from array import * # To remove use of 'arr' everytime.
studentMarks = arr.array('i', [2,30,45,50,90]) # i represnts datatype of array which is int here.
# accessing array
print(studentMarks[3])
studentMarks.append(95)
# using for loop
for i in studentMarks:
print(i)
# using while loop
i = 0
while i < len(studentMarks):
print(studentMarks[i])
i += 1
|
flexible
|
{
"blob_id": "d442d5c7afd32dd149bb47fc9c4355409c53dab8",
"index": 6719,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(studentMarks[3])\nstudentMarks.append(95)\nfor i in studentMarks:\n print(i)\n<mask token>\nwhile i < len(studentMarks):\n print(studentMarks[i])\n i += 1\n",
"step-3": "<mask token>\nstudentMarks = arr.array('i', [2, 30, 45, 50, 90])\nprint(studentMarks[3])\nstudentMarks.append(95)\nfor i in studentMarks:\n print(i)\ni = 0\nwhile i < len(studentMarks):\n print(studentMarks[i])\n i += 1\n",
"step-4": "import array as arr\nstudentMarks = arr.array('i', [2, 30, 45, 50, 90])\nprint(studentMarks[3])\nstudentMarks.append(95)\nfor i in studentMarks:\n print(i)\ni = 0\nwhile i < len(studentMarks):\n print(studentMarks[i])\n i += 1\n",
"step-5": "import array as arr\r\n# from array import * # To remove use of 'arr' everytime.\r\n\r\nstudentMarks = arr.array('i', [2,30,45,50,90]) # i represnts datatype of array which is int here.\r\n\r\n# accessing array\r\nprint(studentMarks[3])\r\n\r\nstudentMarks.append(95)\r\n\r\n# using for loop\r\nfor i in studentMarks:\r\n print(i)\r\n\r\n# using while loop\r\ni = 0\r\nwhile i < len(studentMarks):\r\n print(studentMarks[i]) \r\n i += 1\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg) / 4))
fig = plt.figure()
i = 1
for image in glob.glob('C:/cnnTest/*.jpg'):
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
subplot.set_title(thisImg[i - 1])
subplot.axis('off')
i += 1
print('\t', '전체 이미지 리스트 ')
plt.show()
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath + str)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
def get_DB_Nutrition(str):
db = pymysql.connect(host='localhost', user='yeha', password='', db=
'nutrition')
cur = db.cursor()
sql = (
"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
)
cur.execute(sql, str)
data = cur.fetchall()
df = pd.Series(data[0], data[1])
print(df)
db.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg) / 4))
fig = plt.figure()
i = 1
for image in glob.glob('C:/cnnTest/*.jpg'):
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
subplot.set_title(thisImg[i - 1])
subplot.axis('off')
i += 1
print('\t', '전체 이미지 리스트 ')
plt.show()
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath + str)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
def get_DB_Nutrition(str):
db = pymysql.connect(host='localhost', user='yeha', password='', db=
'nutrition')
cur = db.cursor()
sql = (
"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
)
cur.execute(sql, str)
data = cur.fetchall()
df = pd.Series(data[0], data[1])
print(df)
db.close()
<|reserved_special_token_0|>
for i in range(len(files)):
files[i] = caltech_dir + '/' + files[i]
for f in files:
img = Image.open(f)
img = img.convert('RGB')
img = img.resize((image_w, image_h))
data = np.asarray(img)
X.append(data)
<|reserved_special_token_0|>
np.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})
print('프로그램을 실행합니다..')
print('\n')
<|reserved_special_token_0|>
for i in prediction:
pre_ans = i.argmax()
pre_ans_str = ''
if pre_ans == 0:
pre_ans_str = '연어회'
elif pre_ans == 1:
pre_ans_str = '쌀국수'
elif pre_ans == 2:
pre_ans_str = '샌드위치'
else:
pre_ans_str = '새우튀김'
if i[0] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[1] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[2] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[3] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
cnt += 1
drawing_plt()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg) / 4))
fig = plt.figure()
i = 1
for image in glob.glob('C:/cnnTest/*.jpg'):
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
subplot.set_title(thisImg[i - 1])
subplot.axis('off')
i += 1
print('\t', '전체 이미지 리스트 ')
plt.show()
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath + str)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
def get_DB_Nutrition(str):
db = pymysql.connect(host='localhost', user='yeha', password='', db=
'nutrition')
cur = db.cursor()
sql = (
"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
)
cur.execute(sql, str)
data = cur.fetchall()
df = pd.Series(data[0], data[1])
print(df)
db.close()
caltech_dir = 'C:/cnnTest'
image_w = 128
image_h = 128
pixels = image_h * image_w * 3
X = []
files = os.listdir(caltech_dir)
for i in range(len(files)):
files[i] = caltech_dir + '/' + files[i]
for f in files:
img = Image.open(f)
img = img.convert('RGB')
img = img.resize((image_w, image_h))
data = np.asarray(img)
X.append(data)
X = np.array(X)
<|reserved_special_token_0|>
model = load_model('C:/image/train/model/multi_img_classification.model')
prediction = model.predict(X)
np.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})
print('프로그램을 실행합니다..')
print('\n')
thisImg = os.listdir(caltech_dir)
cnt = 0
for i in prediction:
pre_ans = i.argmax()
pre_ans_str = ''
if pre_ans == 0:
pre_ans_str = '연어회'
elif pre_ans == 1:
pre_ans_str = '쌀국수'
elif pre_ans == 2:
pre_ans_str = '샌드위치'
else:
pre_ans_str = '새우튀김'
if i[0] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[1] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[2] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[3] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
cnt += 1
drawing_plt()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from PIL import Image
import os, glob, numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import cv2
import pymysql
import MySQLdb as mysql
<|reserved_special_token_0|>
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg) / 4))
fig = plt.figure()
i = 1
for image in glob.glob('C:/cnnTest/*.jpg'):
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
subplot.set_title(thisImg[i - 1])
subplot.axis('off')
i += 1
print('\t', '전체 이미지 리스트 ')
plt.show()
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath + str)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
def get_DB_Nutrition(str):
db = pymysql.connect(host='localhost', user='yeha', password='', db=
'nutrition')
cur = db.cursor()
sql = (
"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
)
cur.execute(sql, str)
data = cur.fetchall()
df = pd.Series(data[0], data[1])
print(df)
db.close()
caltech_dir = 'C:/cnnTest'
image_w = 128
image_h = 128
pixels = image_h * image_w * 3
X = []
files = os.listdir(caltech_dir)
for i in range(len(files)):
files[i] = caltech_dir + '/' + files[i]
for f in files:
img = Image.open(f)
img = img.convert('RGB')
img = img.resize((image_w, image_h))
data = np.asarray(img)
X.append(data)
X = np.array(X)
from keras.models import load_model
model = load_model('C:/image/train/model/multi_img_classification.model')
prediction = model.predict(X)
np.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})
print('프로그램을 실행합니다..')
print('\n')
thisImg = os.listdir(caltech_dir)
cnt = 0
for i in prediction:
pre_ans = i.argmax()
pre_ans_str = ''
if pre_ans == 0:
pre_ans_str = '연어회'
elif pre_ans == 1:
pre_ans_str = '쌀국수'
elif pre_ans == 2:
pre_ans_str = '샌드위치'
else:
pre_ans_str = '새우튀김'
if i[0] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[1] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[2] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
if i[3] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')
get_DB_Nutrition(pre_ans_str)
cnt += 1
drawing_plt()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 20:29:49 2019
@author: kzx789
"""
from PIL import Image
import os, glob, numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import cv2
import pymysql
import MySQLdb as mysql
"""
#csv를 읽어서 영양정보 출력
def get_Nutrition(str) :
nutrition = pd.read_csv('C:/식품영양정보/영양정보.csv')
print(nutrition[nutrition['음식명'] == str])
"""
#사용된 전체 이미지 출력
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg)/4)) #반올림
fig = plt.figure()
i = 1
for image in glob.glob("C:/cnnTest/*.jpg"): #glob를 사용해서 Test로 사용된 파일 가져오기
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) #기본컬러
subplot.set_title(thisImg[i-1]) #타이틀 붙이기
subplot.axis("off")
i += 1
print('\t',"전체 이미지 리스트 ")
plt.show()
#조건에 맞는 개별 이미지 출력
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath+str)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
#데이터베이스에서 영양소 정보 가지고 오기
def get_DB_Nutrition(str):
db = pymysql.connect(host="localhost", user = "yeha", password="", db="nutrition")
cur = db.cursor() #Connection에서 Cursor생성
sql = "SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
cur.execute(sql,(str))
data = cur.fetchall() #정보 전부 가져오기
df = pd.Series(data[0],data[1])
print(df)
db.close()
caltech_dir = "C:/cnnTest"
#테스트할 데이터들을 128*128로 지정
image_w = 128
image_h = 128
pixels = image_h * image_w * 3 #픽셀 지정
X = []
#filenames = []
files = os.listdir(caltech_dir) #하위 디렉터리 파일 리스트 구하기
#print(files) #이미지 목록 확인
for i in range(len(files)):
files[i]=caltech_dir+'/'+ files[i]
#print(files)
for f in files:
img = Image.open(f)
img = img.convert("RGB")
img = img.resize((image_w, image_h))
data = np.asarray(img)
# filenames.append(f)
X.append(data)
X = np.array(X)
#print(X)
#모델 불러오기
from keras.models import load_model
model = load_model("C:/image/train/model/multi_img_classification.model")
prediction = model.predict(X)
#print(prediction)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print('프로그램을 실행합니다..')
print('\n')
thisImg = os.listdir(caltech_dir)
cnt = 0
for i in prediction:
pre_ans = i.argmax() # 예측 레이블//가장 큰 번째 수
#print(i)
#print(pre_ans)
pre_ans_str = ''
if pre_ans == 0: pre_ans_str = "연어회"
elif pre_ans == 1: pre_ans_str = "쌀국수"
elif pre_ans == 2: pre_ans_str = "샌드위치"
else: pre_ans_str = "새우튀김"
if i[0] >= 0.8 :
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[1] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[2] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[3] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
cnt += 1
drawing_plt()
|
flexible
|
{
"blob_id": "1255a9df2fbe11d92991f3f0f7054b92cb017628",
"index": 2941,
"step-1": "<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\n<mask token>\nfor i in range(len(files)):\n files[i] = caltech_dir + '/' + files[i]\nfor f in files:\n img = Image.open(f)\n img = img.convert('RGB')\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n X.append(data)\n<mask token>\nnp.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})\nprint('프로그램을 실행합니다..')\nprint('\\n')\n<mask token>\nfor i in prediction:\n pre_ans = i.argmax()\n pre_ans_str = ''\n if pre_ans == 0:\n pre_ans_str = '연어회'\n elif pre_ans == 1:\n pre_ans_str = '쌀국수'\n elif pre_ans == 2:\n pre_ans_str = '샌드위치'\n else:\n pre_ans_str = '새우튀김'\n if i[0] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[1] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[2] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[3] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n cnt += 1\ndrawing_plt()\n",
"step-3": "<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\ncaltech_dir = 'C:/cnnTest'\nimage_w = 128\nimage_h = 128\npixels = image_h * image_w * 3\nX = []\nfiles = os.listdir(caltech_dir)\nfor i in range(len(files)):\n files[i] = caltech_dir + '/' + files[i]\nfor f in files:\n img = Image.open(f)\n img = img.convert('RGB')\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n X.append(data)\nX = np.array(X)\n<mask token>\nmodel = load_model('C:/image/train/model/multi_img_classification.model')\nprediction = model.predict(X)\nnp.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})\nprint('프로그램을 실행합니다..')\nprint('\\n')\nthisImg = os.listdir(caltech_dir)\ncnt = 0\nfor i in prediction:\n pre_ans = i.argmax()\n pre_ans_str = ''\n if pre_ans == 0:\n pre_ans_str = '연어회'\n elif pre_ans == 1:\n pre_ans_str = '쌀국수'\n elif pre_ans == 2:\n pre_ans_str = '샌드위치'\n else:\n pre_ans_str = '새우튀김'\n if i[0] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[1] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[2] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[3] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n cnt += 1\ndrawing_plt()\n",
"step-4": "<mask token>\nfrom PIL import Image\nimport os, glob, numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\nimport pymysql\nimport MySQLdb as mysql\n<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\ncaltech_dir = 'C:/cnnTest'\nimage_w = 128\nimage_h = 128\npixels = image_h * image_w * 3\nX = []\nfiles = os.listdir(caltech_dir)\nfor i in range(len(files)):\n files[i] = caltech_dir + '/' + files[i]\nfor f in files:\n img = Image.open(f)\n img = img.convert('RGB')\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n X.append(data)\nX = np.array(X)\nfrom keras.models import load_model\nmodel = load_model('C:/image/train/model/multi_img_classification.model')\nprediction = model.predict(X)\nnp.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})\nprint('프로그램을 실행합니다..')\nprint('\\n')\nthisImg = os.listdir(caltech_dir)\ncnt = 0\nfor i in prediction:\n pre_ans = i.argmax()\n pre_ans_str = ''\n if pre_ans == 0:\n pre_ans_str = '연어회'\n elif pre_ans == 1:\n pre_ans_str = '쌀국수'\n elif pre_ans == 2:\n pre_ans_str = '샌드위치'\n else:\n pre_ans_str = '새우튀김'\n if i[0] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[1] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[2] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[3] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n cnt += 1\ndrawing_plt()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 20:29:49 2019\n\n@author: kzx789\n\"\"\"\n\nfrom PIL import Image\nimport os, glob, numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\nimport pymysql\nimport MySQLdb as mysql\n\n\"\"\"\n#csv를 읽어서 영양정보 출력\ndef get_Nutrition(str) :\n nutrition = pd.read_csv('C:/식품영양정보/영양정보.csv') \n print(nutrition[nutrition['음식명'] == str])\n\"\"\" \n#사용된 전체 이미지 출력\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg)/4)) #반올림\n fig = plt.figure()\n i = 1\n \n for image in glob.glob(\"C:/cnnTest/*.jpg\"): #glob를 사용해서 Test로 사용된 파일 가져오기\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) #기본컬러\n subplot.set_title(thisImg[i-1]) #타이틀 붙이기\n subplot.axis(\"off\") \n i += 1\n print('\\t',\"전체 이미지 리스트 \")\n plt.show()\n\n#조건에 맞는 개별 이미지 출력\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath+str)\n image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n#데이터베이스에서 영양소 정보 가지고 오기\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host=\"localhost\", user = \"yeha\", password=\"\", db=\"nutrition\")\n cur = db.cursor() #Connection에서 Cursor생성\n sql = \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n cur.execute(sql,(str))\n data = cur.fetchall() #정보 전부 가져오기\n df = pd.Series(data[0],data[1])\n print(df)\n db.close()\n\n\ncaltech_dir = \"C:/cnnTest\"\n\n#테스트할 데이터들을 128*128로 지정\nimage_w = 128\nimage_h = 128\npixels = image_h * image_w * 3 #픽셀 지정\n\nX = []\n#filenames = []\n\nfiles = os.listdir(caltech_dir) #하위 디렉터리 파일 리스트 구하기\n\n#print(files) #이미지 목록 확인 \n\nfor i in range(len(files)):\n files[i]=caltech_dir+'/'+ files[i]\n#print(files) \n\nfor f in files:\n img = Image.open(f)\n img = img.convert(\"RGB\")\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n # filenames.append(f)\n X.append(data)\n\nX = np.array(X)\n#print(X)\n\n#모델 불러오기\nfrom keras.models import load_model\n\nmodel = load_model(\"C:/image/train/model/multi_img_classification.model\")\nprediction = model.predict(X)\n#print(prediction)\n\nnp.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n\n\nprint('프로그램을 실행합니다..')\nprint('\\n')\nthisImg = os.listdir(caltech_dir)\ncnt = 0\n\nfor i in prediction:\n pre_ans = i.argmax() # 예측 레이블//가장 큰 번째 수\n #print(i)\n #print(pre_ans)\n pre_ans_str = ''\n if pre_ans == 0: pre_ans_str = \"연어회\"\n elif pre_ans == 1: pre_ans_str = \"쌀국수\"\n elif pre_ans == 2: pre_ans_str = \"샌드위치\"\n else: pre_ans_str = \"새우튀김\"\n\n if i[0] >= 0.8 : \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n\n if i[1] >= 0.8: \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n\n\n if i[2] >= 0.8: \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n\n if i[3] >= 0.8: \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n cnt += 1\n \ndrawing_plt()\n\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while i == 0:
try:
print("Let's divide some numbers!")
a1 = input('Enter numerator: ')
b1 = input('Enter denominator: ')
a = int(a1)
b = int(b1)
print(a1 + ' divied by ' + b1 + ' equals: ' + str(a / b))
i += 1
except ZeroDivisionError:
print('Cannot divide by 0')
except ValueError:
print('Invalid input, not a number')
<|reserved_special_token_1|>
i = 0
while i == 0:
try:
print("Let's divide some numbers!")
a1 = input('Enter numerator: ')
b1 = input('Enter denominator: ')
a = int(a1)
b = int(b1)
print(a1 + ' divied by ' + b1 + ' equals: ' + str(a / b))
i += 1
except ZeroDivisionError:
print('Cannot divide by 0')
except ValueError:
print('Invalid input, not a number')
<|reserved_special_token_1|>
#!/usr/local/bin/python
i = 0
while i == 0:
try:
print("Let's divide some numbers!")
a1 = input("Enter numerator: ")
b1 = input("Enter denominator: ")
a = int(a1)
b = int(b1)
print(a1 + " divied by " + b1 + " equals: " + str(a/b))
i += 1
except ZeroDivisionError:
print("Cannot divide by 0")
except ValueError:
print("Invalid input, not a number")
|
flexible
|
{
"blob_id": "dcc1b0decf2fca6309dbb60faebd3f0a6944cd7d",
"index": 9130,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i == 0:\n try:\n print(\"Let's divide some numbers!\")\n a1 = input('Enter numerator: ')\n b1 = input('Enter denominator: ')\n a = int(a1)\n b = int(b1)\n print(a1 + ' divied by ' + b1 + ' equals: ' + str(a / b))\n i += 1\n except ZeroDivisionError:\n print('Cannot divide by 0')\n except ValueError:\n print('Invalid input, not a number')\n",
"step-3": "i = 0\nwhile i == 0:\n try:\n print(\"Let's divide some numbers!\")\n a1 = input('Enter numerator: ')\n b1 = input('Enter denominator: ')\n a = int(a1)\n b = int(b1)\n print(a1 + ' divied by ' + b1 + ' equals: ' + str(a / b))\n i += 1\n except ZeroDivisionError:\n print('Cannot divide by 0')\n except ValueError:\n print('Invalid input, not a number')\n",
"step-4": "#!/usr/local/bin/python\n\ni = 0\nwhile i == 0:\n\n try:\n print(\"Let's divide some numbers!\")\n a1 = input(\"Enter numerator: \")\n b1 = input(\"Enter denominator: \")\n a = int(a1)\n b = int(b1)\n \n print(a1 + \" divied by \" + b1 + \" equals: \" + str(a/b))\n i += 1\n \n except ZeroDivisionError:\n print(\"Cannot divide by 0\")\n except ValueError:\n print(\"Invalid input, not a number\")\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""empty message
Revision ID: 6374505f9e6e
Revises: 9dc91bb7d2ba
Create Date: 2016-11-14 10:55:08.418923
"""
# revision identifiers, used by Alembic.
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.types as ty
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
### end Alembic commands ###
|
normal
|
{
"blob_id": "7badb7c9f1e00dfc379468b7bd73a3f09bffe6de",
"index": 1191,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-3": "<mask token>\nrevision = '6374505f9e6e'\ndown_revision = '9dc91bb7d2ba'\n<mask token>\n\n\ndef upgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-4": "<mask token>\nrevision = '6374505f9e6e'\ndown_revision = '9dc91bb7d2ba'\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy.types as ty\n\n\ndef upgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 6374505f9e6e\nRevises: 9dc91bb7d2ba\nCreate Date: 2016-11-14 10:55:08.418923\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6374505f9e6e'\ndown_revision = '9dc91bb7d2ba'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy.types as ty\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n ### end Alembic commands ###\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [5, 3, 2]
@staticmethod
def get_expected_result():
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [5, 3, 2, 2, 9]
@staticmethod
def get_expected_result():
return 2
<|reserved_special_token_0|>
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [5, 3, 2]
@staticmethod
def get_expected_result():
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [5, 3, 2, 2, 9]
@staticmethod
def get_expected_result():
return 2
<|reserved_special_token_0|>
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [5, 3, 2]
@staticmethod
def get_expected_result():
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [5, 3, 2, 2, 9]
@staticmethod
def get_expected_result():
return 2
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
minimum_index(seq)
except ValueError:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def minimum_index(seq):
if len(seq) == 0:
raise ValueError(
'Cannot get the minimum value index from an empty sequence')
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [5, 3, 2]
@staticmethod
def get_expected_result():
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [5, 3, 2, 2, 9]
@staticmethod
def get_expected_result():
return 2
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
minimum_index(seq)
except ValueError:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
TestWithEmptyArray()
TestWithUniqueValues()
TestWithExactyTwoDifferentMinimums()
print('OK')
<|reserved_special_token_1|>
"""
OO 05-18-2020
Task
----------------------------------------------------------------------------------------------------------
Your company needs a function that meets the following requirements:
- For a given array of 'n' integers, the function returns the index of the element with the minimum value
in the array. If there is more than one element with the minimum value, the returned index should be
the smallest one.
- If an empty array is passed to the function, it should raise an Exception.
A colleague has written that function, and your task is to design 3 separated unit tests, testing if the
function behaves correctly. The implementation in Python is listed below (Implementations in other
languages can be found in the code template):
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if a[i] < a[min_idx]:
min_idx = i
return min_idx
Another co-worker has prepared functions that will perform the testing and validate returned results with
expectations. Your task is to implement 3 classes that will produce test data and the expected results for
the testing functions. More specifically: function 'get_array()' in 'TestDataEmptyArray' class and
functions 'get_array()' and 'get_expected_result()' in classes 'TestDataUniqueValues' and
'TestDataExactlyTwoDifferentMinimums' following the below specifications:
- get_array() method in class TestDataEmptyArray has to return an empty array.
- get_array() method in class TestDataUniqueValues has to return an array of size at least 2 with all
unique elements, while method get_expected_result() of this class has to return the expected minimum
value index for this array.
- get_array() method in class TestDataExactlyTwoDifferentMinimums has to return an array where there are
exactly two different minimum values, while method get_expected_result() of this class has to return
the expected minimum value index for this array.
"""
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [5, 3, 2]
@staticmethod
def get_expected_result():
return 2
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [5, 3, 2, 2, 9]
@staticmethod
def get_expected_result():
return 2
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
minimum_index(seq)
except ValueError:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
TestWithEmptyArray()
TestWithUniqueValues()
TestWithExactyTwoDifferentMinimums()
print("OK")
|
flexible
|
{
"blob_id": "8fdc9a52b00686e10c97fa61e43ddbbccb64741b",
"index": 8946,
"step-1": "<mask token>\n\n\nclass TestDataEmptyArray(object):\n\n @staticmethod\n def get_array():\n return []\n\n\nclass TestDataUniqueValues(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\nclass TestDataExactlyTwoDifferentMinimums(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2, 2, 9]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\n<mask token>\n\n\ndef TestWithUniqueValues():\n seq = TestDataUniqueValues.get_array()\n assert len(seq) >= 2\n assert len(list(set(seq))) == len(seq)\n expected_result = TestDataUniqueValues.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDataEmptyArray(object):\n\n @staticmethod\n def get_array():\n return []\n\n\nclass TestDataUniqueValues(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\nclass TestDataExactlyTwoDifferentMinimums(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2, 2, 9]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\n<mask token>\n\n\ndef TestWithUniqueValues():\n seq = TestDataUniqueValues.get_array()\n assert len(seq) >= 2\n assert len(list(set(seq))) == len(seq)\n expected_result = TestDataUniqueValues.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\ndef TestWithExactyTwoDifferentMinimums():\n seq = TestDataExactlyTwoDifferentMinimums.get_array()\n assert len(seq) >= 2\n tmp = sorted(seq)\n assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])\n expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestDataEmptyArray(object):\n\n @staticmethod\n def get_array():\n return []\n\n\nclass TestDataUniqueValues(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\nclass TestDataExactlyTwoDifferentMinimums(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2, 2, 9]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\ndef TestWithEmptyArray():\n try:\n seq = TestDataEmptyArray.get_array()\n minimum_index(seq)\n except ValueError:\n pass\n else:\n assert False\n\n\ndef TestWithUniqueValues():\n seq = TestDataUniqueValues.get_array()\n assert len(seq) >= 2\n assert len(list(set(seq))) == len(seq)\n expected_result = TestDataUniqueValues.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\ndef TestWithExactyTwoDifferentMinimums():\n seq = TestDataExactlyTwoDifferentMinimums.get_array()\n assert len(seq) >= 2\n tmp = sorted(seq)\n assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])\n expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef minimum_index(seq):\n if len(seq) == 0:\n raise ValueError(\n 'Cannot get the minimum value index from an empty sequence')\n min_idx = 0\n for i in range(1, len(seq)):\n if seq[i] < seq[min_idx]:\n min_idx = i\n return min_idx\n\n\nclass TestDataEmptyArray(object):\n\n @staticmethod\n def get_array():\n return []\n\n\nclass TestDataUniqueValues(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\nclass TestDataExactlyTwoDifferentMinimums(object):\n\n @staticmethod\n def get_array():\n return [5, 3, 2, 2, 9]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\ndef TestWithEmptyArray():\n try:\n seq = TestDataEmptyArray.get_array()\n minimum_index(seq)\n except ValueError:\n pass\n else:\n assert False\n\n\ndef TestWithUniqueValues():\n seq = TestDataUniqueValues.get_array()\n assert len(seq) >= 2\n assert len(list(set(seq))) == len(seq)\n expected_result = TestDataUniqueValues.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\ndef TestWithExactyTwoDifferentMinimums():\n seq = TestDataExactlyTwoDifferentMinimums.get_array()\n assert len(seq) >= 2\n tmp = sorted(seq)\n assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])\n expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()\n result = minimum_index(seq)\n assert result == expected_result\n\n\nTestWithEmptyArray()\nTestWithUniqueValues()\nTestWithExactyTwoDifferentMinimums()\nprint('OK')\n",
"step-5": "\"\"\"\n OO 05-18-2020\n\n Task\n ----------------------------------------------------------------------------------------------------------\n Your company needs a function that meets the following requirements:\n\n - For a given array of 'n' integers, the function returns the index of the element with the minimum value\n in the array. If there is more than one element with the minimum value, the returned index should be\n the smallest one.\n\n - If an empty array is passed to the function, it should raise an Exception.\n\n A colleague has written that function, and your task is to design 3 separated unit tests, testing if the\n function behaves correctly. The implementation in Python is listed below (Implementations in other\n languages can be found in the code template):\n \n def minimum_index(seq):\n if len(seq) == 0:\n raise ValueError(\"Cannot get the minimum value index from an empty sequence\")\n min_idx = 0\n for i in range(1, len(seq)):\n if a[i] < a[min_idx]:\n min_idx = i\n return min_idx\n\n Another co-worker has prepared functions that will perform the testing and validate returned results with\n expectations. Your task is to implement 3 classes that will produce test data and the expected results for\n the testing functions. More specifically: function 'get_array()' in 'TestDataEmptyArray' class and\n functions 'get_array()' and 'get_expected_result()' in classes 'TestDataUniqueValues' and\n 'TestDataExactlyTwoDifferentMinimums' following the below specifications:\n\n - get_array() method in class TestDataEmptyArray has to return an empty array.\n - get_array() method in class TestDataUniqueValues has to return an array of size at least 2 with all\n unique elements, while method get_expected_result() of this class has to return the expected minimum\n value index for this array.\n - get_array() method in class TestDataExactlyTwoDifferentMinimums has to return an array where there are\n exactly two different minimum values, while method get_expected_result() of this class has to return\n the expected minimum value index for this array.\n\n\"\"\"\n\n\ndef minimum_index(seq):\n if len(seq) == 0:\n raise ValueError(\"Cannot get the minimum value index from an empty sequence\")\n\n min_idx = 0\n for i in range(1, len(seq)):\n if seq[i] < seq[min_idx]:\n min_idx = i\n\n return min_idx\n\n\nclass TestDataEmptyArray(object):\n @staticmethod\n def get_array():\n return []\n\n\nclass TestDataUniqueValues(object):\n @staticmethod\n def get_array():\n return [5, 3, 2]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\nclass TestDataExactlyTwoDifferentMinimums(object):\n @staticmethod\n def get_array():\n return [5, 3, 2, 2, 9]\n\n @staticmethod\n def get_expected_result():\n return 2\n\n\ndef TestWithEmptyArray():\n try:\n seq = TestDataEmptyArray.get_array()\n minimum_index(seq)\n except ValueError:\n pass\n else:\n assert False\n\n\ndef TestWithUniqueValues():\n seq = TestDataUniqueValues.get_array()\n\n assert len(seq) >= 2\n assert len(list(set(seq))) == len(seq)\n\n expected_result = TestDataUniqueValues.get_expected_result()\n result = minimum_index(seq)\n\n assert result == expected_result\n\n\ndef TestWithExactyTwoDifferentMinimums():\n seq = TestDataExactlyTwoDifferentMinimums.get_array()\n\n assert len(seq) >= 2\n\n tmp = sorted(seq)\n\n assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])\n\n expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()\n result = minimum_index(seq)\n\n assert result == expected_result\n\n\nTestWithEmptyArray()\nTestWithUniqueValues()\nTestWithExactyTwoDifferentMinimums()\nprint(\"OK\")\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
"Unit tests for reverse URL lookup"
from django.core.urlresolvers import reverse_helper, NoReverseMatch
import re, unittest
test_data = (
('^places/(\d+)/$', 'places/3/', [3], {}),
('^places/(\d+)/$', 'places/3/', ['3'], {}),
('^places/(\d+)/$', NoReverseMatch, ['a'], {}),
('^places/(\d+)/$', NoReverseMatch, [], {}),
('^places/(?P<id>\d+)/$', 'places/3/', [], {'id': 3}),
('^people/(?P<name>\w+)/$', 'people/adrian/', ['adrian'], {}),
('^people/(?P<name>\w+)/$', 'people/adrian/', [], {'name': 'adrian'}),
('^people/(?P<name>\w+)/$', NoReverseMatch, ['name with spaces'], {}),
('^people/(?P<name>\w+)/$', NoReverseMatch, [], {'name': 'name with spaces'}),
('^people/(?P<name>\w+)/$', NoReverseMatch, [], {}),
('^hardcoded/$', 'hardcoded/', [], {}),
('^hardcoded/$', 'hardcoded/', ['any arg'], {}),
('^hardcoded/$', 'hardcoded/', [], {'kwarg': 'foo'}),
('^people/(?P<state>\w\w)/(?P<name>\w+)/$', 'people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('^people/(?P<state>\w\w)/(?P<name>\d)/$', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('^people/(?P<state>\w\w)/(?P<name>\w+)/$', NoReverseMatch, [], {'state': 'il'}),
('^people/(?P<state>\w\w)/(?P<name>\w+)/$', NoReverseMatch, [], {'name': 'adrian'}),
('^people/(?P<state>\w\w)/(\w+)/$', NoReverseMatch, ['il'], {'name': 'adrian'}),
('^people/(?P<state>\w\w)/(\w+)/$', 'people/il/adrian/', ['adrian'], {'state': 'il'}),
)
class URLPatternReverse(unittest.TestCase):
def test_urlpattern_reverse(self):
for regex, expected, args, kwargs in test_data:
try:
got = reverse_helper(re.compile(regex), *args, **kwargs)
except NoReverseMatch, e:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEquals(got, expected)
if __name__ == "__main__":
run_tests(1)
|
normal
|
{
"blob_id": "b7ccb41c43a0db6f1bf9e6ba5cef1b9b1417e297",
"index": 633,
"step-1": "\"Unit tests for reverse URL lookup\"\n\nfrom django.core.urlresolvers import reverse_helper, NoReverseMatch\nimport re, unittest\n\ntest_data = (\n ('^places/(\\d+)/$', 'places/3/', [3], {}),\n ('^places/(\\d+)/$', 'places/3/', ['3'], {}),\n ('^places/(\\d+)/$', NoReverseMatch, ['a'], {}),\n ('^places/(\\d+)/$', NoReverseMatch, [], {}),\n ('^places/(?P<id>\\d+)/$', 'places/3/', [], {'id': 3}),\n ('^people/(?P<name>\\w+)/$', 'people/adrian/', ['adrian'], {}),\n ('^people/(?P<name>\\w+)/$', 'people/adrian/', [], {'name': 'adrian'}),\n ('^people/(?P<name>\\w+)/$', NoReverseMatch, ['name with spaces'], {}),\n ('^people/(?P<name>\\w+)/$', NoReverseMatch, [], {'name': 'name with spaces'}),\n ('^people/(?P<name>\\w+)/$', NoReverseMatch, [], {}),\n ('^hardcoded/$', 'hardcoded/', [], {}),\n ('^hardcoded/$', 'hardcoded/', ['any arg'], {}),\n ('^hardcoded/$', 'hardcoded/', [], {'kwarg': 'foo'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\w+)/$', 'people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\d)/$', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\w+)/$', NoReverseMatch, [], {'state': 'il'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\w+)/$', NoReverseMatch, [], {'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(\\w+)/$', NoReverseMatch, ['il'], {'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(\\w+)/$', 'people/il/adrian/', ['adrian'], {'state': 'il'}),\n)\n\nclass URLPatternReverse(unittest.TestCase):\n def test_urlpattern_reverse(self):\n for regex, expected, args, kwargs in test_data:\n try:\n got = reverse_helper(re.compile(regex), *args, **kwargs)\n except NoReverseMatch, e:\n self.assertEqual(expected, NoReverseMatch)\n else:\n self.assertEquals(got, expected)\n\nif __name__ == \"__main__\":\n run_tests(1)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--header-mutate-level', type=int, choices=range(11
), nargs='?', help=
'Set the mutation level for the headers (0-10). Default = 5', default=5
)
parser.add_argument('--body-mutate-level', type=int, choices=range(11),
nargs='?', help=
'Set the mutation level for the body (0-10). Default = 5', default=5)
parser.add_argument('--request-mutate-level', type=int, choices=range(
11), nargs='?', help=
'Set the mutation level for the request line (0-10). Default = 5',
default=5)
parser.add_argument('--body-type', type=str, choices=['json', 'junk',
'rand'], help=
'Set the data generated in the request body. Default = rand',
default='rand')
parser.add_argument('--num-headers', type=int, help=
'Sets the maximum number of headers. Default = number of available headers'
, default=-1)
parser.add_argument('--generate-num', type=int, help=
'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'
, default=1)
parser.add_argument('-v', '--version', action='version', version=
'HTTPFuzz Version: 1.0.1')
args = parser.parse_args()
if args.generate_num > 1:
try:
os.mkdir('output')
for i in range(args.generate_num):
with open('output/{}.txt'.format(i + 1), 'w') as f:
request_frame = RequestFrame(args)
request_frame.generate()
f.write(request_frame.request)
print('[+] Wrote request to /output/{}.txt'.format(i + 1))
exit('[+] Finished creating requests')
except:
exit(
"[-] Couldn't make the output directory. It might already exist."
)
request_frame = RequestFrame(args)
request_frame.generate()
exit(request_frame.request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.dont_write_bytecode = True
<|reserved_special_token_0|>
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--header-mutate-level', type=int, choices=range(11
), nargs='?', help=
'Set the mutation level for the headers (0-10). Default = 5', default=5
)
parser.add_argument('--body-mutate-level', type=int, choices=range(11),
nargs='?', help=
'Set the mutation level for the body (0-10). Default = 5', default=5)
parser.add_argument('--request-mutate-level', type=int, choices=range(
11), nargs='?', help=
'Set the mutation level for the request line (0-10). Default = 5',
default=5)
parser.add_argument('--body-type', type=str, choices=['json', 'junk',
'rand'], help=
'Set the data generated in the request body. Default = rand',
default='rand')
parser.add_argument('--num-headers', type=int, help=
'Sets the maximum number of headers. Default = number of available headers'
, default=-1)
parser.add_argument('--generate-num', type=int, help=
'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'
, default=1)
parser.add_argument('-v', '--version', action='version', version=
'HTTPFuzz Version: 1.0.1')
args = parser.parse_args()
if args.generate_num > 1:
try:
os.mkdir('output')
for i in range(args.generate_num):
with open('output/{}.txt'.format(i + 1), 'w') as f:
request_frame = RequestFrame(args)
request_frame.generate()
f.write(request_frame.request)
print('[+] Wrote request to /output/{}.txt'.format(i + 1))
exit('[+] Finished creating requests')
except:
exit(
"[-] Couldn't make the output directory. It might already exist."
)
request_frame = RequestFrame(args)
request_frame.generate()
exit(request_frame.request)
<|reserved_special_token_1|>
import sys, os
sys.dont_write_bytecode = True
import argparse, socket
from requestframe import RequestFrame
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--header-mutate-level', type=int, choices=range(11
), nargs='?', help=
'Set the mutation level for the headers (0-10). Default = 5', default=5
)
parser.add_argument('--body-mutate-level', type=int, choices=range(11),
nargs='?', help=
'Set the mutation level for the body (0-10). Default = 5', default=5)
parser.add_argument('--request-mutate-level', type=int, choices=range(
11), nargs='?', help=
'Set the mutation level for the request line (0-10). Default = 5',
default=5)
parser.add_argument('--body-type', type=str, choices=['json', 'junk',
'rand'], help=
'Set the data generated in the request body. Default = rand',
default='rand')
parser.add_argument('--num-headers', type=int, help=
'Sets the maximum number of headers. Default = number of available headers'
, default=-1)
parser.add_argument('--generate-num', type=int, help=
'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'
, default=1)
parser.add_argument('-v', '--version', action='version', version=
'HTTPFuzz Version: 1.0.1')
args = parser.parse_args()
if args.generate_num > 1:
try:
os.mkdir('output')
for i in range(args.generate_num):
with open('output/{}.txt'.format(i + 1), 'w') as f:
request_frame = RequestFrame(args)
request_frame.generate()
f.write(request_frame.request)
print('[+] Wrote request to /output/{}.txt'.format(i + 1))
exit('[+] Finished creating requests')
except:
exit(
"[-] Couldn't make the output directory. It might already exist."
)
request_frame = RequestFrame(args)
request_frame.generate()
exit(request_frame.request)
<|reserved_special_token_1|>
# Testing
import sys, os
sys.dont_write_bytecode = True
import argparse, socket
from requestframe import RequestFrame
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--header-mutate-level", type=int, choices=range(11), nargs='?', help="Set the mutation level for the headers (0-10). Default = 5", default=5)
parser.add_argument("--body-mutate-level", type=int, choices=range(11), nargs='?', help="Set the mutation level for the body (0-10). Default = 5", default=5)
parser.add_argument("--request-mutate-level", type=int, choices=range(11), nargs='?', help="Set the mutation level for the request line (0-10). Default = 5", default=5)
parser.add_argument("--body-type", type=str, choices=['json', 'junk', 'rand'], help="Set the data generated in the request body. Default = rand", default='rand')
parser.add_argument("--num-headers", type=int, help="Sets the maximum number of headers. Default = number of available headers", default=-1)
parser.add_argument("--generate-num", type=int, help="Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1", default=1)
parser.add_argument('-v', '--version', action='version', version='HTTPFuzz Version: 1.0.1')
args = parser.parse_args()
if args.generate_num > 1:
try:
os.mkdir("output")
for i in range(args.generate_num):
with open("output/{}.txt".format(i + 1), 'w') as f:
request_frame = RequestFrame(args)
request_frame.generate()
f.write(request_frame.request)
print("[+] Wrote request to /output/{}.txt".format(i + 1))
exit("[+] Finished creating requests")
except:
exit("[-] Couldn't make the output directory. It might already exist.")
request_frame = RequestFrame(args)
request_frame.generate()
exit(request_frame.request)
|
flexible
|
{
"blob_id": "350a79d6cead6814ad48292b14a204e753dc938c",
"index": 4363,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--header-mutate-level', type=int, choices=range(11\n ), nargs='?', help=\n 'Set the mutation level for the headers (0-10). Default = 5', default=5\n )\n parser.add_argument('--body-mutate-level', type=int, choices=range(11),\n nargs='?', help=\n 'Set the mutation level for the body (0-10). Default = 5', default=5)\n parser.add_argument('--request-mutate-level', type=int, choices=range(\n 11), nargs='?', help=\n 'Set the mutation level for the request line (0-10). Default = 5',\n default=5)\n parser.add_argument('--body-type', type=str, choices=['json', 'junk',\n 'rand'], help=\n 'Set the data generated in the request body. Default = rand',\n default='rand')\n parser.add_argument('--num-headers', type=int, help=\n 'Sets the maximum number of headers. Default = number of available headers'\n , default=-1)\n parser.add_argument('--generate-num', type=int, help=\n 'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'\n , default=1)\n parser.add_argument('-v', '--version', action='version', version=\n 'HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir('output')\n for i in range(args.generate_num):\n with open('output/{}.txt'.format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print('[+] Wrote request to /output/{}.txt'.format(i + 1))\n exit('[+] Finished creating requests')\n except:\n exit(\n \"[-] Couldn't make the output directory. It might already exist.\"\n )\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--header-mutate-level', type=int, choices=range(11\n ), nargs='?', help=\n 'Set the mutation level for the headers (0-10). Default = 5', default=5\n )\n parser.add_argument('--body-mutate-level', type=int, choices=range(11),\n nargs='?', help=\n 'Set the mutation level for the body (0-10). Default = 5', default=5)\n parser.add_argument('--request-mutate-level', type=int, choices=range(\n 11), nargs='?', help=\n 'Set the mutation level for the request line (0-10). Default = 5',\n default=5)\n parser.add_argument('--body-type', type=str, choices=['json', 'junk',\n 'rand'], help=\n 'Set the data generated in the request body. Default = rand',\n default='rand')\n parser.add_argument('--num-headers', type=int, help=\n 'Sets the maximum number of headers. Default = number of available headers'\n , default=-1)\n parser.add_argument('--generate-num', type=int, help=\n 'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'\n , default=1)\n parser.add_argument('-v', '--version', action='version', version=\n 'HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir('output')\n for i in range(args.generate_num):\n with open('output/{}.txt'.format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print('[+] Wrote request to /output/{}.txt'.format(i + 1))\n exit('[+] Finished creating requests')\n except:\n exit(\n \"[-] Couldn't make the output directory. It might already exist.\"\n )\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-4": "import sys, os\nsys.dont_write_bytecode = True\nimport argparse, socket\nfrom requestframe import RequestFrame\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--header-mutate-level', type=int, choices=range(11\n ), nargs='?', help=\n 'Set the mutation level for the headers (0-10). Default = 5', default=5\n )\n parser.add_argument('--body-mutate-level', type=int, choices=range(11),\n nargs='?', help=\n 'Set the mutation level for the body (0-10). Default = 5', default=5)\n parser.add_argument('--request-mutate-level', type=int, choices=range(\n 11), nargs='?', help=\n 'Set the mutation level for the request line (0-10). Default = 5',\n default=5)\n parser.add_argument('--body-type', type=str, choices=['json', 'junk',\n 'rand'], help=\n 'Set the data generated in the request body. Default = rand',\n default='rand')\n parser.add_argument('--num-headers', type=int, help=\n 'Sets the maximum number of headers. Default = number of available headers'\n , default=-1)\n parser.add_argument('--generate-num', type=int, help=\n 'Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1'\n , default=1)\n parser.add_argument('-v', '--version', action='version', version=\n 'HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir('output')\n for i in range(args.generate_num):\n with open('output/{}.txt'.format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print('[+] Wrote request to /output/{}.txt'.format(i + 1))\n exit('[+] Finished creating requests')\n except:\n exit(\n \"[-] Couldn't make the output directory. It might already exist.\"\n )\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-5": "# Testing\nimport sys, os\nsys.dont_write_bytecode = True\n\nimport argparse, socket\nfrom requestframe import RequestFrame\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--header-mutate-level\", type=int, choices=range(11), nargs='?', help=\"Set the mutation level for the headers (0-10). Default = 5\", default=5)\n parser.add_argument(\"--body-mutate-level\", type=int, choices=range(11), nargs='?', help=\"Set the mutation level for the body (0-10). Default = 5\", default=5)\n parser.add_argument(\"--request-mutate-level\", type=int, choices=range(11), nargs='?', help=\"Set the mutation level for the request line (0-10). Default = 5\", default=5)\n parser.add_argument(\"--body-type\", type=str, choices=['json', 'junk', 'rand'], help=\"Set the data generated in the request body. Default = rand\", default='rand')\n parser.add_argument(\"--num-headers\", type=int, help=\"Sets the maximum number of headers. Default = number of available headers\", default=-1)\n parser.add_argument(\"--generate-num\", type=int, help=\"Number of requests to generate. Any more than 1 generated request will output to a new folder called output/. Default = 1\", default=1)\n parser.add_argument('-v', '--version', action='version', version='HTTPFuzz Version: 1.0.1')\n args = parser.parse_args()\n if args.generate_num > 1:\n try:\n os.mkdir(\"output\")\n for i in range(args.generate_num):\n with open(\"output/{}.txt\".format(i + 1), 'w') as f:\n request_frame = RequestFrame(args)\n request_frame.generate()\n f.write(request_frame.request)\n print(\"[+] Wrote request to /output/{}.txt\".format(i + 1))\n exit(\"[+] Finished creating requests\")\n except:\n exit(\"[-] Couldn't make the output directory. It might already exist.\")\n request_frame = RequestFrame(args)\n request_frame.generate()\n exit(request_frame.request)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import cv2
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cv2.namedWindow('Measure Angle with centerline')
# WebCam Initialize
vidCapture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))
while True:
# key = cv2.waitKey(1) & 0xFF
# if key == 27:
# break
ret, frame = vidCapture.read()
if ret==True:
# frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# img = np.zeros((512, 512, 3), np.uint8)
# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)
# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)
# cv2.imshow('frame', frame)
vidCapture.release()
out.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "500d6f473f07b35bf2d075d3061ac2e54eab702a",
"index": 4156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.namedWindow('Measure Angle with centerline')\n<mask token>\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nFRAME_WIDTH = 320\nFRAME_HEIGHT = 240\ncv2.namedWindow('Measure Angle with centerline')\nvidCapture = cv2.VideoCapture(1)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nFRAME_WIDTH = 320\nFRAME_HEIGHT = 240\ncv2.namedWindow('Measure Angle with centerline')\nvidCapture = cv2.VideoCapture(1)\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480))\nwhile True:\n ret, frame = vidCapture.read()\n if ret == True:\n out.write(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\nvidCapture.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\r\nimport cv2\r\n\r\nFRAME_WIDTH = 320\r\nFRAME_HEIGHT = 240\r\n\r\ncv2.namedWindow('Measure Angle with centerline')\r\n\r\n# WebCam Initialize\r\nvidCapture = cv2.VideoCapture(1)\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID') \r\nout = cv2.VideoWriter('webcam_record.avi', fourcc, 20.0, (640, 480)) \r\n\r\nwhile True:\r\n\r\n\t# key = cv2.waitKey(1) & 0xFF\r\n\t# if key == 27:\r\n\t# \tbreak\r\n\r\n\tret, frame = vidCapture.read()\r\n\t\r\n\tif ret==True:\r\n\t\t# frame = cv2.flip(frame,0)\r\n\r\n # write the flipped frame\r\n\t\tout.write(frame)\r\n\r\n\t\tcv2.imshow('frame',frame)\r\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\r\n\t\t\tbreak\r\n\telse:\r\n\t\tbreak\r\n\t# img = np.zeros((512, 512, 3), np.uint8)\r\n\t# cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), 2)\r\n\t# cv2.line(frame, (0, 120), (320, 120), (255, 0, 0), 2)\r\n\r\n\t# cv2.imshow('frame', frame)\r\n\r\nvidCapture.release()\r\nout.release()\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import urllib.request
import urllib.parse
import json
content = input("请输入需要翻译的内容:")
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
data['action'] = 'FY_BY_CLICKBUTTION'
data['bv'] = '1ca13a5465c2ab126e616ee8d6720cc3'
data['client'] = 'fanyideskweb'
data['doctype'] = 'json'
data['from'] = 'AUTO'
data['i'] = content
data['keyfrom'] = 'fanyi.web'
data['salt'] = '15708737847078'
data['sign'] = '64037c1dd211ea7bd98321a3bd8ab45a'
data['smartresult'] = 'dict'
data['to'] = 'AUTO'
data['ts'] = '1570873784707'
data['version'] = '2.1'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url,data)
html = response.read().decode('utf-8')
target = json.loads(html)
print("翻译结果:%s" % (target['translateResult'][0][0]['tgt']))
|
normal
|
{
"blob_id": "e01b1f57a572571619d6c0981370030dc6105fd2",
"index": 8636,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('翻译结果:%s' % target['translateResult'][0][0]['tgt'])\n",
"step-3": "<mask token>\ncontent = input('请输入需要翻译的内容:')\nurl = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\ndata = {}\ndata['action'] = 'FY_BY_CLICKBUTTION'\ndata['bv'] = '1ca13a5465c2ab126e616ee8d6720cc3'\ndata['client'] = 'fanyideskweb'\ndata['doctype'] = 'json'\ndata['from'] = 'AUTO'\ndata['i'] = content\ndata['keyfrom'] = 'fanyi.web'\ndata['salt'] = '15708737847078'\ndata['sign'] = '64037c1dd211ea7bd98321a3bd8ab45a'\ndata['smartresult'] = 'dict'\ndata['to'] = 'AUTO'\ndata['ts'] = '1570873784707'\ndata['version'] = '2.1'\ndata = urllib.parse.urlencode(data).encode('utf-8')\nresponse = urllib.request.urlopen(url, data)\nhtml = response.read().decode('utf-8')\ntarget = json.loads(html)\nprint('翻译结果:%s' % target['translateResult'][0][0]['tgt'])\n",
"step-4": "import urllib.request\nimport urllib.parse\nimport json\ncontent = input('请输入需要翻译的内容:')\nurl = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\ndata = {}\ndata['action'] = 'FY_BY_CLICKBUTTION'\ndata['bv'] = '1ca13a5465c2ab126e616ee8d6720cc3'\ndata['client'] = 'fanyideskweb'\ndata['doctype'] = 'json'\ndata['from'] = 'AUTO'\ndata['i'] = content\ndata['keyfrom'] = 'fanyi.web'\ndata['salt'] = '15708737847078'\ndata['sign'] = '64037c1dd211ea7bd98321a3bd8ab45a'\ndata['smartresult'] = 'dict'\ndata['to'] = 'AUTO'\ndata['ts'] = '1570873784707'\ndata['version'] = '2.1'\ndata = urllib.parse.urlencode(data).encode('utf-8')\nresponse = urllib.request.urlopen(url, data)\nhtml = response.read().decode('utf-8')\ntarget = json.loads(html)\nprint('翻译结果:%s' % target['translateResult'][0][0]['tgt'])\n",
"step-5": "import urllib.request\nimport urllib.parse\nimport json\n\ncontent = input(\"请输入需要翻译的内容:\")\n\nurl = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n\ndata = {}\ndata['action'] = 'FY_BY_CLICKBUTTION'\ndata['bv'] = '1ca13a5465c2ab126e616ee8d6720cc3'\ndata['client'] = 'fanyideskweb'\ndata['doctype'] = 'json'\ndata['from'] = 'AUTO'\ndata['i'] = content\ndata['keyfrom'] = 'fanyi.web'\ndata['salt'] = '15708737847078'\ndata['sign'] = '64037c1dd211ea7bd98321a3bd8ab45a'\ndata['smartresult'] = 'dict'\ndata['to'] = 'AUTO'\ndata['ts'] = '1570873784707'\ndata['version'] = '2.1'\ndata = urllib.parse.urlencode(data).encode('utf-8')\n\nresponse = urllib.request.urlopen(url,data)\nhtml = response.read().decode('utf-8')\n\ntarget = json.loads(html)\nprint(\"翻译结果:%s\" % (target['translateResult'][0][0]['tgt']))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 20:28:44 2019
@author: nicholustintzaw
"""
####################################################################################################
####################################################################################################
'''
project tite : social pension database - national level
purpose : data migration national social pension data check and summary statistics
developed by : Nicholus Tint Zaw
modified date : 3rd Dec 2019
follow-up action:
'''
####################################################################################################
####################################################################################################
### PLEASE, CHANGE YOUR DIRECTORY BELOW ###
masterdir = r'C:\Users\Age.ing\Dropbox\01_Eligable\_New_QRT_COMBINE_CHECK_Window'
### PLEASE, CHANGE THE CASH TRANSFER BUDGET YEAR QUARTER BELOW ###
qrt = '1st_qrt_2019_2020'
####################################################################################################
####################################################################################################
################ PLEASE, DON'T TOUCH ANY PYTHON CODES BELOW ########################################
####################################################################################################
####################################################################################################
####################################################################################################
### task 1: prepare the directory setting
####################################################################################################
import os
os.chdir(masterdir)
exec(open("01_newqs_directory.py", 'r', encoding="utf8").read())
####################################################################################################
### task 2: combined all completed new quarter files
####################################################################################################
## IN
# 02_new_register
exec(open("02_new_register.py", 'r', encoding="utf8").read())
# 03_moved_in
exec(open("03_moved_in.py", 'r', encoding="utf8").read())
# 04_false_death
exec(open("04_false_death.py", 'r', encoding="utf8").read())
# OUT
# 05_death
exec(open("05_death.py", 'r', encoding="utf8").read())
# 06_moved_out
exec(open("06_moved_out.py", 'r', encoding="utf8").read())
# 07_false_reg
exec(open("07_false_reg.py", 'r', encoding="utf8").read())
# COMBINED REPORT
# State and Region level combined
exec(open("08_combined_report.py", 'r', encoding="utf8").read())
####################################################################################################
|
normal
|
{
"blob_id": "5a2716fc7b4c0a56fbd0de5d45d71fb33320adf0",
"index": 2889,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.chdir(masterdir)\nexec(open('01_newqs_directory.py', 'r', encoding='utf8').read())\nexec(open('02_new_register.py', 'r', encoding='utf8').read())\nexec(open('03_moved_in.py', 'r', encoding='utf8').read())\nexec(open('04_false_death.py', 'r', encoding='utf8').read())\nexec(open('05_death.py', 'r', encoding='utf8').read())\nexec(open('06_moved_out.py', 'r', encoding='utf8').read())\nexec(open('07_false_reg.py', 'r', encoding='utf8').read())\nexec(open('08_combined_report.py', 'r', encoding='utf8').read())\n",
"step-3": "<mask token>\nmasterdir = (\n 'C:\\\\Users\\\\Age.ing\\\\Dropbox\\\\01_Eligable\\\\_New_QRT_COMBINE_CHECK_Window')\nqrt = '1st_qrt_2019_2020'\n<mask token>\nos.chdir(masterdir)\nexec(open('01_newqs_directory.py', 'r', encoding='utf8').read())\nexec(open('02_new_register.py', 'r', encoding='utf8').read())\nexec(open('03_moved_in.py', 'r', encoding='utf8').read())\nexec(open('04_false_death.py', 'r', encoding='utf8').read())\nexec(open('05_death.py', 'r', encoding='utf8').read())\nexec(open('06_moved_out.py', 'r', encoding='utf8').read())\nexec(open('07_false_reg.py', 'r', encoding='utf8').read())\nexec(open('08_combined_report.py', 'r', encoding='utf8').read())\n",
"step-4": "<mask token>\nmasterdir = (\n 'C:\\\\Users\\\\Age.ing\\\\Dropbox\\\\01_Eligable\\\\_New_QRT_COMBINE_CHECK_Window')\nqrt = '1st_qrt_2019_2020'\nimport os\nos.chdir(masterdir)\nexec(open('01_newqs_directory.py', 'r', encoding='utf8').read())\nexec(open('02_new_register.py', 'r', encoding='utf8').read())\nexec(open('03_moved_in.py', 'r', encoding='utf8').read())\nexec(open('04_false_death.py', 'r', encoding='utf8').read())\nexec(open('05_death.py', 'r', encoding='utf8').read())\nexec(open('06_moved_out.py', 'r', encoding='utf8').read())\nexec(open('07_false_reg.py', 'r', encoding='utf8').read())\nexec(open('08_combined_report.py', 'r', encoding='utf8').read())\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 4 20:28:44 2019\n\n@author: nicholustintzaw\n\"\"\"\n\n\n####################################################################################################\n####################################################################################################\n'''\nproject tite : social pension database - national level\npurpose : data migration national social pension data check and summary statistics\ndeveloped by : Nicholus Tint Zaw \nmodified date : 3rd Dec 2019\n\nfollow-up action:\n \n'''\n####################################################################################################\n####################################################################################################\n\n\n### PLEASE, CHANGE YOUR DIRECTORY BELOW ###\nmasterdir = r'C:\\Users\\Age.ing\\Dropbox\\01_Eligable\\_New_QRT_COMBINE_CHECK_Window'\n\n\n### PLEASE, CHANGE THE CASH TRANSFER BUDGET YEAR QUARTER BELOW ###\nqrt = '1st_qrt_2019_2020'\n\n\n\n\n####################################################################################################\n####################################################################################################\n################ PLEASE, DON'T TOUCH ANY PYTHON CODES BELOW ########################################\n####################################################################################################\n####################################################################################################\n\n\n\n\n####################################################################################################\n### task 1: prepare the directory setting\n####################################################################################################\n\nimport os\nos.chdir(masterdir)\n\nexec(open(\"01_newqs_directory.py\", 'r', encoding=\"utf8\").read())\n\n\n\n####################################################################################################\n### task 2: combined all completed new quarter files\n####################################################################################################\n\n \n## IN\n\n# 02_new_register\nexec(open(\"02_new_register.py\", 'r', encoding=\"utf8\").read())\n\n# 03_moved_in\nexec(open(\"03_moved_in.py\", 'r', encoding=\"utf8\").read())\n\n# 04_false_death\nexec(open(\"04_false_death.py\", 'r', encoding=\"utf8\").read())\n\n\n\n# OUT\n# 05_death\nexec(open(\"05_death.py\", 'r', encoding=\"utf8\").read())\n\n# 06_moved_out\nexec(open(\"06_moved_out.py\", 'r', encoding=\"utf8\").read())\n\n# 07_false_reg\nexec(open(\"07_false_reg.py\", 'r', encoding=\"utf8\").read())\n\n\n# COMBINED REPORT\n# State and Region level combined\nexec(open(\"08_combined_report.py\", 'r', encoding=\"utf8\").read())\n\n\n####################################################################################################\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestDocument(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDocument(unittest.TestCase):
def setUp(self):
self.filepath = 'tests/corpus/a.xml'
self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.
WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),
'//tei:body/tei:div[@type = "dummy"]')
def test_get_text_count(self):
self.assertEqual(2, self.doc.get_text_count())
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDocument(unittest.TestCase):
def setUp(self):
self.filepath = 'tests/corpus/a.xml'
self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.
WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),
'//tei:body/tei:div[@type = "dummy"]')
def test_get_text_count(self):
self.assertEqual(2, self.doc.get_text_count())
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
from bacalhau.tei_document import TEIDocument
import nltk
import unittest
class TestDocument(unittest.TestCase):
def setUp(self):
self.filepath = 'tests/corpus/a.xml'
self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.
WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),
'//tei:body/tei:div[@type = "dummy"]')
def test_get_text_count(self):
self.assertEqual(2, self.doc.get_text_count())
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "f86d01c4b980ac44dcdb1b0008493e1dbda25971",
"index": 4544,
"step-1": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from bacalhau.tei_document import TEIDocument\nimport nltk\nimport unittest\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
Package for haasplugin.
"""
|
flexible
|
{
"blob_id": "20518302b6a67f8f1ac01f1adf4fe06ab2eaf280",
"index": 3098,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\nPackage for haasplugin.\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''Lab01 ex4
E/16/319 Rathnayake R.P.V.N'''
from dataclasses import asdict
from json import dumps
from dataclasses import dataclass
from typing import List, Dict
import json
import ex1 #import the ex1 to get the lord_course_registraion function
s1=ex1.load_course_registrations("data.txt") #lord the list of Student object in to the s1
s1=(map(asdict,s1)) #aply asdict() to s1 my useng the map function
e=json.dumps(list(s1)) #convert into jsom=n string
#print(e)
with open("student_registrations.json","w") as f: #open json file and write on it
f.write(e)
|
normal
|
{
"blob_id": "8a5ade450485f9114fa91c00c7588535ccbaf0e6",
"index": 1923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('student_registrations.json', 'w') as f:\n f.write(e)\n",
"step-3": "<mask token>\ns1 = ex1.load_course_registrations('data.txt')\ns1 = map(asdict, s1)\ne = json.dumps(list(s1))\nwith open('student_registrations.json', 'w') as f:\n f.write(e)\n",
"step-4": "<mask token>\nfrom dataclasses import asdict\nfrom json import dumps\nfrom dataclasses import dataclass\nfrom typing import List, Dict\nimport json\nimport ex1\ns1 = ex1.load_course_registrations('data.txt')\ns1 = map(asdict, s1)\ne = json.dumps(list(s1))\nwith open('student_registrations.json', 'w') as f:\n f.write(e)\n",
"step-5": "'''Lab01 ex4\n\tE/16/319 Rathnayake R.P.V.N'''\nfrom dataclasses import asdict\nfrom json import dumps\nfrom dataclasses import dataclass\nfrom typing import List, Dict\nimport json\nimport ex1\t\t#import the ex1 to get the lord_course_registraion function\n\n\ns1=ex1.load_course_registrations(\"data.txt\")\t#lord the list of Student object in to the s1\ns1=(map(asdict,s1))\t\t\t\t\t\t\t\t#aply asdict() to s1 my useng the map function\n\ne=json.dumps(list(s1))\t\t\t\t\t\t\t#convert into jsom=n string\n#print(e)\nwith open(\"student_registrations.json\",\"w\") as f:\t\t#open json file and write on it\n\tf.write(e)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .net import *
|
flexible
|
{
"blob_id": "73337246bd54df53842360510148f3a6f4763ace",
"index": 6251,
"step-1": "<mask token>\n",
"step-2": "from .net import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from packer.utils import hello_world
|
flexible
|
{
"blob_id": "d549303228e860ae278a5a9497a4a3a68989aeca",
"index": 6097,
"step-1": "<mask token>\n",
"step-2": "from packer.utils import hello_world\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YumiConstants:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class YumiConstants:
T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],
[0, 0, -1]], from_frame='gripper', to_frame='obj')
T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,
1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=
'home', to_frame='yumi')
T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,
1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,
1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05
], from_frame='home', to_frame='yumi')
T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,
1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=
'home', to_frame='yumi')
T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,
1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2,
0.35720003], from_frame='home', to_frame='yumi')
T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],
from_frame='board', to_frame='yumi')
board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,
-1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',
to_frame='world')
T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0,
0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=
'home', to_frame='yumi')
T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,
-1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=
'home', to_frame='yumi')
right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,
-26.22, -76.76])
left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -
169.18, 50.61])
right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91,
4.83, -26.93])
left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -
8.73, 42.77])
<|reserved_special_token_1|>
from autolab_core import rigid_transformations as rt
from yumipy import YuMiState
class YumiConstants:
T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],
[0, 0, -1]], from_frame='gripper', to_frame='obj')
T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,
1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=
'home', to_frame='yumi')
T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,
1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,
1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05
], from_frame='home', to_frame='yumi')
T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,
1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=
'home', to_frame='yumi')
T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,
1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2,
0.35720003], from_frame='home', to_frame='yumi')
T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],
from_frame='board', to_frame='yumi')
board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,
-1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',
to_frame='world')
T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0,
0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=
'home', to_frame='yumi')
T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,
-1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=
'home', to_frame='yumi')
right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,
-26.22, -76.76])
left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -
169.18, 50.61])
right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91,
4.83, -26.93])
left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -
8.73, 42.77])
<|reserved_special_token_1|>
# import visual_servoing_utils_main as utils
from autolab_core import rigid_transformations as rt
from yumipy import YuMiState
class YumiConstants:
T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
from_frame='gripper', to_frame='obj')
T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256, -0.15060002, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],
from_frame='home', to_frame='yumi')
T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.52070004, 0.07340001, 0.3574],
from_frame='home', to_frame='yumi')
T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],
from_frame='home', to_frame='yumi')
T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],
from_frame='board', to_frame='yumi')
board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.42971, -0.004, -0.057],
from_frame='yumi', to_frame='world')
T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 - 8 * 0.0375, 0.0837],
from_frame='home', to_frame='yumi')
T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 + 8 * 0.0375, 0.0837],
# translation=[0.3984, 0 + 8*0.0375, 0.0837],
from_frame='home', to_frame='yumi')
right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])
left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])
right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])
left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])
|
flexible
|
{
"blob_id": "34c81b9318d978305748d413c869a86ee6709e2c",
"index": 996,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass YumiConstants:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"step-4": "from autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"step-5": "# import visual_servoing_utils_main as utils\nfrom autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\nclass YumiConstants:\n\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n from_frame='gripper', to_frame='obj')\n\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256, -0.15060002, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.52070004, 0.07340001, 0.3574],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],\n from_frame='home', to_frame='yumi')\n\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n\n\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.42971, -0.004, -0.057],\n from_frame='yumi', to_frame='world')\n\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 - 8 * 0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 + 8 * 0.0375, 0.0837],\n # translation=[0.3984, 0 + 8*0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])\n\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for letter in input_string:
if letter in input_string:
output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]
else:
output_string += letter
print(f'Encoded String is {output_string}')
<|reserved_special_token_1|>
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = [i for i in ALPHABET]
output_string = ''
input_string = input('Enter a String : ')
key = int(input('Enter the key: '))
for letter in input_string:
if letter in input_string:
output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]
else:
output_string += letter
print(f'Encoded String is {output_string}')
<|reserved_special_token_1|>
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
# Convert the ALPHABET to list
ALPHABET = [i for i in ALPHABET]
output_string = ''
input_string = input('Enter a String : ')
key = int(input('Enter the key: '))
for letter in input_string:
if letter in input_string:
# ALPHABET.index(letter) returns the index of that letter in the ALPHABET list
# then we can add the key to that index to get the letter
# then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list
output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]
else:
output_string += letter
print(f'Encoded String is {output_string}')
|
flexible
|
{
"blob_id": "b2db622596d0dff970e44759d25360a62f5fea83",
"index": 4725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor letter in input_string:\n if letter in input_string:\n output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]\n else:\n output_string += letter\nprint(f'Encoded String is {output_string}')\n",
"step-3": "ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\nALPHABET = [i for i in ALPHABET]\noutput_string = ''\ninput_string = input('Enter a String : ')\nkey = int(input('Enter the key: '))\nfor letter in input_string:\n if letter in input_string:\n output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]\n else:\n output_string += letter\nprint(f'Encoded String is {output_string}')\n",
"step-4": "ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\n# Convert the ALPHABET to list\nALPHABET = [i for i in ALPHABET]\noutput_string = ''\ninput_string = input('Enter a String : ')\n\nkey = int(input('Enter the key: '))\n\nfor letter in input_string:\n if letter in input_string:\n # ALPHABET.index(letter) returns the index of that letter in the ALPHABET list\n # then we can add the key to that index to get the letter\n # then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list\n output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]\n else:\n output_string += letter\n\nprint(f'Encoded String is {output_string}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.